diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2006-11-06 04:49:00 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2006-11-06 04:49:00 -0500 |
commit | bcc8bcb1f0cc51c0042497d5de2d79743050e3bb (patch) | |
tree | b60d66fb534589a2770f69265ccc768977711993 /arch/s390/kernel | |
parent | d1ed6a3ea10aa7b199c434f6ffd1b6761896567a (diff) |
[S390] revert add_active_range() usage patch.
Commit 7676bef9c183fd573822cac9992927ef596d584c breaks DCSS support on
s390. DCSS needs initialized struct pages to work. With the usage of
add_active_range() only the struct pages for physically present pages
are initialized.
This could be fixed if the DCSS driver would initiliaze the struct pages
itself, but this doesn't work too. This is because the mem_map array
does not include holes after the last present memory area and therefore
there is nothing that could be initialized.
To fix this and to avoid some dirty hacks revert this patch for now.
Will be added later when we move to a virtual mem_map.
Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r-- | arch/s390/kernel/setup.c | 53 |
1 files changed, 41 insertions, 12 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 49f2b68e32b1..a31abddf115b 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -70,6 +70,7 @@ struct { | |||
70 | #define CHUNK_READ_WRITE 0 | 70 | #define CHUNK_READ_WRITE 0 |
71 | #define CHUNK_READ_ONLY 1 | 71 | #define CHUNK_READ_ONLY 1 |
72 | volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ | 72 | volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ |
73 | unsigned long __initdata zholes_size[MAX_NR_ZONES]; | ||
73 | static unsigned long __initdata memory_end; | 74 | static unsigned long __initdata memory_end; |
74 | 75 | ||
75 | /* | 76 | /* |
@@ -357,6 +358,21 @@ void machine_power_off(void) | |||
357 | */ | 358 | */ |
358 | void (*pm_power_off)(void) = machine_power_off; | 359 | void (*pm_power_off)(void) = machine_power_off; |
359 | 360 | ||
361 | static void __init | ||
362 | add_memory_hole(unsigned long start, unsigned long end) | ||
363 | { | ||
364 | unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; | ||
365 | |||
366 | if (end <= dma_pfn) | ||
367 | zholes_size[ZONE_DMA] += end - start + 1; | ||
368 | else if (start > dma_pfn) | ||
369 | zholes_size[ZONE_NORMAL] += end - start + 1; | ||
370 | else { | ||
371 | zholes_size[ZONE_DMA] += dma_pfn - start + 1; | ||
372 | zholes_size[ZONE_NORMAL] += end - dma_pfn; | ||
373 | } | ||
374 | } | ||
375 | |||
360 | static int __init early_parse_mem(char *p) | 376 | static int __init early_parse_mem(char *p) |
361 | { | 377 | { |
362 | memory_end = memparse(p, &p); | 378 | memory_end = memparse(p, &p); |
@@ -478,6 +494,7 @@ setup_memory(void) | |||
478 | { | 494 | { |
479 | unsigned long bootmap_size; | 495 | unsigned long bootmap_size; |
480 | unsigned long start_pfn, end_pfn, init_pfn; | 496 | unsigned long start_pfn, end_pfn, init_pfn; |
497 | unsigned long last_rw_end; | ||
481 | int i; | 498 | int i; |
482 | 499 | ||
483 | /* | 500 | /* |
@@ -533,27 +550,39 @@ setup_memory(void) | |||
533 | /* | 550 | /* |
534 | * Register RAM areas with the bootmem allocator. | 551 | * Register RAM areas with the bootmem allocator. |
535 | */ | 552 | */ |
553 | last_rw_end = start_pfn; | ||
536 | 554 | ||
537 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 555 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
538 | unsigned long start_chunk, end_chunk, pfn; | 556 | unsigned long start_chunk, end_chunk; |
539 | 557 | ||
540 | if (memory_chunk[i].type != CHUNK_READ_WRITE) | 558 | if (memory_chunk[i].type != CHUNK_READ_WRITE) |
541 | continue; | 559 | continue; |
542 | start_chunk = PFN_DOWN(memory_chunk[i].addr); | 560 | start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); |
543 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1; | 561 | start_chunk >>= PAGE_SHIFT; |
544 | end_chunk = min(end_chunk, end_pfn); | 562 | end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); |
545 | if (start_chunk >= end_chunk) | 563 | end_chunk >>= PAGE_SHIFT; |
546 | continue; | 564 | if (start_chunk < start_pfn) |
547 | add_active_range(0, start_chunk, end_chunk); | 565 | start_chunk = start_pfn; |
548 | pfn = max(start_chunk, start_pfn); | 566 | if (end_chunk > end_pfn) |
549 | for (; pfn <= end_chunk; pfn++) | 567 | end_chunk = end_pfn; |
550 | page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); | 568 | if (start_chunk < end_chunk) { |
569 | /* Initialize storage key for RAM pages */ | ||
570 | for (init_pfn = start_chunk ; init_pfn < end_chunk; | ||
571 | init_pfn++) | ||
572 | page_set_storage_key(init_pfn << PAGE_SHIFT, | ||
573 | PAGE_DEFAULT_KEY); | ||
574 | free_bootmem(start_chunk << PAGE_SHIFT, | ||
575 | (end_chunk - start_chunk) << PAGE_SHIFT); | ||
576 | if (last_rw_end < start_chunk) | ||
577 | add_memory_hole(last_rw_end, start_chunk - 1); | ||
578 | last_rw_end = end_chunk; | ||
579 | } | ||
551 | } | 580 | } |
552 | 581 | ||
553 | psw_set_key(PAGE_DEFAULT_KEY); | 582 | psw_set_key(PAGE_DEFAULT_KEY); |
554 | 583 | ||
555 | free_bootmem_with_active_regions(0, max_pfn); | 584 | if (last_rw_end < end_pfn - 1) |
556 | reserve_bootmem(0, PFN_PHYS(start_pfn)); | 585 | add_memory_hole(last_rw_end, end_pfn - 1); |
557 | 586 | ||
558 | /* | 587 | /* |
559 | * Reserve the bootmem bitmap itself as well. We do this in two | 588 | * Reserve the bootmem bitmap itself as well. We do this in two |