diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-04-30 07:38:47 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-04-30 07:38:48 -0400 |
commit | 17f345808563d2f425b2b15d60c4a5b00112e9eb (patch) | |
tree | e12fe48f44c5d4d50cf1e92e679bc1badea0623a /arch/s390 | |
parent | 53492b1de46a7576170e865062ffcfc93bb5650b (diff) |
[S390] Convert to SPARSEMEM & SPARSEMEM_VMEMMAP
Convert s390 to SPARSEMEM and SPARSEMEM_VMEMMAP. We do a select
of SPARSEMEM_VMEMMAP since it is configurable. This is because
SPARSEMEM without SPARSEMEM_VMEMMAP gives us a hell of broken
include dependencies that I don't want to fix.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 8 | ||||
-rw-r--r-- | arch/s390/mm/extmem.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 81 |
4 files changed, 20 insertions, 79 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8f5f02160ffc..29a7940f284f 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -300,6 +300,14 @@ comment "Kernel preemption" | |||
300 | 300 | ||
301 | source "kernel/Kconfig.preempt" | 301 | source "kernel/Kconfig.preempt" |
302 | 302 | ||
303 | config ARCH_SPARSEMEM_ENABLE | ||
304 | def_bool y | ||
305 | select SPARSEMEM_VMEMMAP_ENABLE | ||
306 | select SPARSEMEM_VMEMMAP | ||
307 | |||
308 | config ARCH_SPARSEMEM_DEFAULT | ||
309 | def_bool y | ||
310 | |||
303 | source "mm/Kconfig" | 311 | source "mm/Kconfig" |
304 | 312 | ||
305 | comment "I/O subsystem configuration" | 313 | comment "I/O subsystem configuration" |
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c index ed2af0a3303b..f231f5ec74b6 100644 --- a/arch/s390/mm/extmem.c +++ b/arch/s390/mm/extmem.c | |||
@@ -287,7 +287,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
287 | if (rc < 0) | 287 | if (rc < 0) |
288 | goto out_free; | 288 | goto out_free; |
289 | 289 | ||
290 | rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 290 | rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1); |
291 | 291 | ||
292 | if (rc) | 292 | if (rc) |
293 | goto out_free; | 293 | goto out_free; |
@@ -351,7 +351,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long | |||
351 | release_resource(seg->res); | 351 | release_resource(seg->res); |
352 | kfree(seg->res); | 352 | kfree(seg->res); |
353 | out_shared: | 353 | out_shared: |
354 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 354 | vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); |
355 | out_free: | 355 | out_free: |
356 | kfree(seg); | 356 | kfree(seg); |
357 | out: | 357 | out: |
@@ -474,7 +474,7 @@ segment_modify_shared (char *name, int do_nonshared) | |||
474 | rc = 0; | 474 | rc = 0; |
475 | goto out_unlock; | 475 | goto out_unlock; |
476 | out_del: | 476 | out_del: |
477 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 477 | vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); |
478 | list_del(&seg->list); | 478 | list_del(&seg->list); |
479 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); | 479 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); |
480 | kfree(seg); | 480 | kfree(seg); |
@@ -508,7 +508,7 @@ segment_unload(char *name) | |||
508 | goto out_unlock; | 508 | goto out_unlock; |
509 | release_resource(seg->res); | 509 | release_resource(seg->res); |
510 | kfree(seg->res); | 510 | kfree(seg->res); |
511 | remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1); | 511 | vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1); |
512 | list_del(&seg->list); | 512 | list_del(&seg->list); |
513 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); | 513 | dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy); |
514 | kfree(seg); | 514 | kfree(seg); |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index acc92f46a096..fa31de6ae97a 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -106,6 +106,8 @@ void __init paging_init(void) | |||
106 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); | 106 | __ctl_load(S390_lowcore.kernel_asce, 13, 13); |
107 | __raw_local_irq_ssm(ssm_mask); | 107 | __raw_local_irq_ssm(ssm_mask); |
108 | 108 | ||
109 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | ||
110 | sparse_init(); | ||
109 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 111 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
110 | #ifdef CONFIG_ZONE_DMA | 112 | #ifdef CONFIG_ZONE_DMA |
111 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); | 113 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 97bce6c97574..beccacf907f3 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -27,43 +27,6 @@ struct memory_segment { | |||
27 | 27 | ||
28 | static LIST_HEAD(mem_segs); | 28 | static LIST_HEAD(mem_segs); |
29 | 29 | ||
30 | void __meminit memmap_init(unsigned long size, int nid, unsigned long zone, | ||
31 | unsigned long start_pfn) | ||
32 | { | ||
33 | struct page *start, *end; | ||
34 | struct page *map_start, *map_end; | ||
35 | int i; | ||
36 | |||
37 | start = pfn_to_page(start_pfn); | ||
38 | end = start + size; | ||
39 | |||
40 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | ||
41 | unsigned long cstart, cend; | ||
42 | |||
43 | cstart = PFN_DOWN(memory_chunk[i].addr); | ||
44 | cend = cstart + PFN_DOWN(memory_chunk[i].size); | ||
45 | |||
46 | map_start = mem_map + cstart; | ||
47 | map_end = mem_map + cend; | ||
48 | |||
49 | if (map_start < start) | ||
50 | map_start = start; | ||
51 | if (map_end > end) | ||
52 | map_end = end; | ||
53 | |||
54 | map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) | ||
55 | / sizeof(struct page); | ||
56 | map_end += ((PFN_ALIGN((unsigned long) map_end) | ||
57 | - (unsigned long) map_end) | ||
58 | / sizeof(struct page)); | ||
59 | |||
60 | if (map_start < map_end) | ||
61 | memmap_init_zone((unsigned long)(map_end - map_start), | ||
62 | nid, zone, page_to_pfn(map_start), | ||
63 | MEMMAP_EARLY); | ||
64 | } | ||
65 | } | ||
66 | |||
67 | static void __ref *vmem_alloc_pages(unsigned int order) | 30 | static void __ref *vmem_alloc_pages(unsigned int order) |
68 | { | 31 | { |
69 | if (slab_is_available()) | 32 | if (slab_is_available()) |
@@ -115,7 +78,7 @@ static pte_t __init_refok *vmem_pte_alloc(void) | |||
115 | /* | 78 | /* |
116 | * Add a physical memory range to the 1:1 mapping. | 79 | * Add a physical memory range to the 1:1 mapping. |
117 | */ | 80 | */ |
118 | static int vmem_add_range(unsigned long start, unsigned long size, int ro) | 81 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) |
119 | { | 82 | { |
120 | unsigned long address; | 83 | unsigned long address; |
121 | pgd_t *pg_dir; | 84 | pgd_t *pg_dir; |
@@ -209,10 +172,9 @@ static void vmem_remove_range(unsigned long start, unsigned long size) | |||
209 | /* | 172 | /* |
210 | * Add a backed mem_map array to the virtual mem_map array. | 173 | * Add a backed mem_map array to the virtual mem_map array. |
211 | */ | 174 | */ |
212 | static int vmem_add_mem_map(unsigned long start, unsigned long size) | 175 | int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) |
213 | { | 176 | { |
214 | unsigned long address, start_addr, end_addr; | 177 | unsigned long address, start_addr, end_addr; |
215 | struct page *map_start, *map_end; | ||
216 | pgd_t *pg_dir; | 178 | pgd_t *pg_dir; |
217 | pud_t *pu_dir; | 179 | pud_t *pu_dir; |
218 | pmd_t *pm_dir; | 180 | pmd_t *pm_dir; |
@@ -220,11 +182,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size) | |||
220 | pte_t pte; | 182 | pte_t pte; |
221 | int ret = -ENOMEM; | 183 | int ret = -ENOMEM; |
222 | 184 | ||
223 | map_start = VMEM_MAP + PFN_DOWN(start); | 185 | start_addr = (unsigned long) start; |
224 | map_end = VMEM_MAP + PFN_DOWN(start + size); | 186 | end_addr = (unsigned long) (start + nr); |
225 | |||
226 | start_addr = (unsigned long) map_start & PAGE_MASK; | ||
227 | end_addr = PFN_ALIGN((unsigned long) map_end); | ||
228 | 187 | ||
229 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { | 188 | for (address = start_addr; address < end_addr; address += PAGE_SIZE) { |
230 | pg_dir = pgd_offset_k(address); | 189 | pg_dir = pgd_offset_k(address); |
@@ -268,16 +227,6 @@ out: | |||
268 | return ret; | 227 | return ret; |
269 | } | 228 | } |
270 | 229 | ||
271 | static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | ||
272 | { | ||
273 | int ret; | ||
274 | |||
275 | ret = vmem_add_mem_map(start, size); | ||
276 | if (ret) | ||
277 | return ret; | ||
278 | return vmem_add_range(start, size, ro); | ||
279 | } | ||
280 | |||
281 | /* | 230 | /* |
282 | * Add memory segment to the segment list if it doesn't overlap with | 231 | * Add memory segment to the segment list if it doesn't overlap with |
283 | * an already present segment. | 232 | * an already present segment. |
@@ -315,7 +264,7 @@ static void __remove_shared_memory(struct memory_segment *seg) | |||
315 | vmem_remove_range(seg->start, seg->size); | 264 | vmem_remove_range(seg->start, seg->size); |
316 | } | 265 | } |
317 | 266 | ||
318 | int remove_shared_memory(unsigned long start, unsigned long size) | 267 | int vmem_remove_mapping(unsigned long start, unsigned long size) |
319 | { | 268 | { |
320 | struct memory_segment *seg; | 269 | struct memory_segment *seg; |
321 | int ret; | 270 | int ret; |
@@ -339,11 +288,9 @@ out: | |||
339 | return ret; | 288 | return ret; |
340 | } | 289 | } |
341 | 290 | ||
342 | int add_shared_memory(unsigned long start, unsigned long size) | 291 | int vmem_add_mapping(unsigned long start, unsigned long size) |
343 | { | 292 | { |
344 | struct memory_segment *seg; | 293 | struct memory_segment *seg; |
345 | struct page *page; | ||
346 | unsigned long pfn, num_pfn, end_pfn; | ||
347 | int ret; | 294 | int ret; |
348 | 295 | ||
349 | mutex_lock(&vmem_mutex); | 296 | mutex_lock(&vmem_mutex); |
@@ -361,21 +308,6 @@ int add_shared_memory(unsigned long start, unsigned long size) | |||
361 | ret = vmem_add_mem(start, size, 0); | 308 | ret = vmem_add_mem(start, size, 0); |
362 | if (ret) | 309 | if (ret) |
363 | goto out_remove; | 310 | goto out_remove; |
364 | |||
365 | pfn = PFN_DOWN(start); | ||
366 | num_pfn = PFN_DOWN(size); | ||
367 | end_pfn = pfn + num_pfn; | ||
368 | |||
369 | page = pfn_to_page(pfn); | ||
370 | memset(page, 0, num_pfn * sizeof(struct page)); | ||
371 | |||
372 | for (; pfn < end_pfn; pfn++) { | ||
373 | page = pfn_to_page(pfn); | ||
374 | init_page_count(page); | ||
375 | reset_page_mapcount(page); | ||
376 | SetPageReserved(page); | ||
377 | INIT_LIST_HEAD(&page->lru); | ||
378 | } | ||
379 | goto out; | 311 | goto out; |
380 | 312 | ||
381 | out_remove: | 313 | out_remove: |
@@ -401,7 +333,6 @@ void __init vmem_map_init(void) | |||
401 | INIT_LIST_HEAD(&init_mm.context.crst_list); | 333 | INIT_LIST_HEAD(&init_mm.context.crst_list); |
402 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); | 334 | INIT_LIST_HEAD(&init_mm.context.pgtable_list); |
403 | init_mm.context.noexec = 0; | 335 | init_mm.context.noexec = 0; |
404 | NODE_DATA(0)->node_mem_map = VMEM_MAP; | ||
405 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; | 336 | ro_start = ((unsigned long)&_stext) & PAGE_MASK; |
406 | ro_end = PFN_ALIGN((unsigned long)&_eshared); | 337 | ro_end = PFN_ALIGN((unsigned long)&_eshared); |
407 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 338 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |