diff options
| -rw-r--r-- | arch/s390/Kconfig | 3 | ||||
| -rw-r--r-- | arch/s390/defconfig | 1 | ||||
| -rw-r--r-- | arch/s390/kernel/setup.c | 53 | ||||
| -rw-r--r-- | arch/s390/mm/init.c | 32 |
4 files changed, 63 insertions, 26 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 608193cfe43f..245b81bc7157 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
| @@ -236,9 +236,6 @@ config WARN_STACK_SIZE | |||
| 236 | This allows you to specify the maximum frame size a function may | 236 | This allows you to specify the maximum frame size a function may |
| 237 | have without the compiler complaining about it. | 237 | have without the compiler complaining about it. |
| 238 | 238 | ||
| 239 | config ARCH_POPULATES_NODE_MAP | ||
| 240 | def_bool y | ||
| 241 | |||
| 242 | source "mm/Kconfig" | 239 | source "mm/Kconfig" |
| 243 | 240 | ||
| 244 | comment "I/O subsystem configuration" | 241 | comment "I/O subsystem configuration" |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index c313e9a9304f..7cd51e73e274 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
| @@ -119,7 +119,6 @@ CONFIG_PACK_STACK=y | |||
| 119 | CONFIG_CHECK_STACK=y | 119 | CONFIG_CHECK_STACK=y |
| 120 | CONFIG_STACK_GUARD=256 | 120 | CONFIG_STACK_GUARD=256 |
| 121 | # CONFIG_WARN_STACK is not set | 121 | # CONFIG_WARN_STACK is not set |
| 122 | CONFIG_ARCH_POPULATES_NODE_MAP=y | ||
| 123 | CONFIG_SELECT_MEMORY_MODEL=y | 122 | CONFIG_SELECT_MEMORY_MODEL=y |
| 124 | CONFIG_FLATMEM_MANUAL=y | 123 | CONFIG_FLATMEM_MANUAL=y |
| 125 | # CONFIG_DISCONTIGMEM_MANUAL is not set | 124 | # CONFIG_DISCONTIGMEM_MANUAL is not set |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 49f2b68e32b1..a31abddf115b 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
| @@ -70,6 +70,7 @@ struct { | |||
| 70 | #define CHUNK_READ_WRITE 0 | 70 | #define CHUNK_READ_WRITE 0 |
| 71 | #define CHUNK_READ_ONLY 1 | 71 | #define CHUNK_READ_ONLY 1 |
| 72 | volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ | 72 | volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ |
| 73 | unsigned long __initdata zholes_size[MAX_NR_ZONES]; | ||
| 73 | static unsigned long __initdata memory_end; | 74 | static unsigned long __initdata memory_end; |
| 74 | 75 | ||
| 75 | /* | 76 | /* |
| @@ -357,6 +358,21 @@ void machine_power_off(void) | |||
| 357 | */ | 358 | */ |
| 358 | void (*pm_power_off)(void) = machine_power_off; | 359 | void (*pm_power_off)(void) = machine_power_off; |
| 359 | 360 | ||
| 361 | static void __init | ||
| 362 | add_memory_hole(unsigned long start, unsigned long end) | ||
| 363 | { | ||
| 364 | unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; | ||
| 365 | |||
| 366 | if (end <= dma_pfn) | ||
| 367 | zholes_size[ZONE_DMA] += end - start + 1; | ||
| 368 | else if (start > dma_pfn) | ||
| 369 | zholes_size[ZONE_NORMAL] += end - start + 1; | ||
| 370 | else { | ||
| 371 | zholes_size[ZONE_DMA] += dma_pfn - start + 1; | ||
| 372 | zholes_size[ZONE_NORMAL] += end - dma_pfn; | ||
| 373 | } | ||
| 374 | } | ||
| 375 | |||
| 360 | static int __init early_parse_mem(char *p) | 376 | static int __init early_parse_mem(char *p) |
| 361 | { | 377 | { |
| 362 | memory_end = memparse(p, &p); | 378 | memory_end = memparse(p, &p); |
| @@ -478,6 +494,7 @@ setup_memory(void) | |||
| 478 | { | 494 | { |
| 479 | unsigned long bootmap_size; | 495 | unsigned long bootmap_size; |
| 480 | unsigned long start_pfn, end_pfn, init_pfn; | 496 | unsigned long start_pfn, end_pfn, init_pfn; |
| 497 | unsigned long last_rw_end; | ||
| 481 | int i; | 498 | int i; |
| 482 | 499 | ||
| 483 | /* | 500 | /* |
| @@ -533,27 +550,39 @@ setup_memory(void) | |||
| 533 | /* | 550 | /* |
| 534 | * Register RAM areas with the bootmem allocator. | 551 | * Register RAM areas with the bootmem allocator. |
| 535 | */ | 552 | */ |
| 553 | last_rw_end = start_pfn; | ||
| 536 | 554 | ||
| 537 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { | 555 | for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { |
| 538 | unsigned long start_chunk, end_chunk, pfn; | 556 | unsigned long start_chunk, end_chunk; |
| 539 | 557 | ||
| 540 | if (memory_chunk[i].type != CHUNK_READ_WRITE) | 558 | if (memory_chunk[i].type != CHUNK_READ_WRITE) |
| 541 | continue; | 559 | continue; |
| 542 | start_chunk = PFN_DOWN(memory_chunk[i].addr); | 560 | start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); |
| 543 | end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1; | 561 | start_chunk >>= PAGE_SHIFT; |
| 544 | end_chunk = min(end_chunk, end_pfn); | 562 | end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); |
| 545 | if (start_chunk >= end_chunk) | 563 | end_chunk >>= PAGE_SHIFT; |
| 546 | continue; | 564 | if (start_chunk < start_pfn) |
| 547 | add_active_range(0, start_chunk, end_chunk); | 565 | start_chunk = start_pfn; |
| 548 | pfn = max(start_chunk, start_pfn); | 566 | if (end_chunk > end_pfn) |
| 549 | for (; pfn <= end_chunk; pfn++) | 567 | end_chunk = end_pfn; |
| 550 | page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); | 568 | if (start_chunk < end_chunk) { |
| 569 | /* Initialize storage key for RAM pages */ | ||
| 570 | for (init_pfn = start_chunk ; init_pfn < end_chunk; | ||
| 571 | init_pfn++) | ||
| 572 | page_set_storage_key(init_pfn << PAGE_SHIFT, | ||
| 573 | PAGE_DEFAULT_KEY); | ||
| 574 | free_bootmem(start_chunk << PAGE_SHIFT, | ||
| 575 | (end_chunk - start_chunk) << PAGE_SHIFT); | ||
| 576 | if (last_rw_end < start_chunk) | ||
| 577 | add_memory_hole(last_rw_end, start_chunk - 1); | ||
| 578 | last_rw_end = end_chunk; | ||
| 579 | } | ||
| 551 | } | 580 | } |
| 552 | 581 | ||
| 553 | psw_set_key(PAGE_DEFAULT_KEY); | 582 | psw_set_key(PAGE_DEFAULT_KEY); |
| 554 | 583 | ||
| 555 | free_bootmem_with_active_regions(0, max_pfn); | 584 | if (last_rw_end < end_pfn - 1) |
| 556 | reserve_bootmem(0, PFN_PHYS(start_pfn)); | 585 | add_memory_hole(last_rw_end, end_pfn - 1); |
| 557 | 586 | ||
| 558 | /* | 587 | /* |
| 559 | * Reserve the bootmem bitmap itself as well. We do this in two | 588 | * Reserve the bootmem bitmap itself as well. We do this in two |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index d99891718709..e1881c31b1cb 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
| @@ -84,6 +84,7 @@ void show_mem(void) | |||
| 84 | printk("%d pages swap cached\n",cached); | 84 | printk("%d pages swap cached\n",cached); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | extern unsigned long __initdata zholes_size[]; | ||
| 87 | /* | 88 | /* |
| 88 | * paging_init() sets up the page tables | 89 | * paging_init() sets up the page tables |
| 89 | */ | 90 | */ |
| @@ -100,15 +101,16 @@ void __init paging_init(void) | |||
| 100 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; | 101 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; |
| 101 | static const int ssm_mask = 0x04000000L; | 102 | static const int ssm_mask = 0x04000000L; |
| 102 | unsigned long ro_start_pfn, ro_end_pfn; | 103 | unsigned long ro_start_pfn, ro_end_pfn; |
| 103 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 104 | unsigned long zones_size[MAX_NR_ZONES]; |
| 104 | 105 | ||
| 105 | ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); | 106 | ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); |
| 106 | ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); | 107 | ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); |
| 107 | 108 | ||
| 108 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 109 | memset(zones_size, 0, sizeof(zones_size)); |
| 109 | max_zone_pfns[ZONE_DMA] = max_low_pfn; | 110 | zones_size[ZONE_DMA] = max_low_pfn; |
| 110 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 111 | free_area_init_node(0, &contig_page_data, zones_size, |
| 111 | free_area_init_nodes(max_zone_pfns); | 112 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, |
| 113 | zholes_size); | ||
| 112 | 114 | ||
| 113 | /* unmap whole virtual address space */ | 115 | /* unmap whole virtual address space */ |
| 114 | 116 | ||
| @@ -168,16 +170,26 @@ void __init paging_init(void) | |||
| 168 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | | 170 | unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | |
| 169 | _KERN_REGION_TABLE; | 171 | _KERN_REGION_TABLE; |
| 170 | static const int ssm_mask = 0x04000000L; | 172 | static const int ssm_mask = 0x04000000L; |
| 173 | unsigned long zones_size[MAX_NR_ZONES]; | ||
| 174 | unsigned long dma_pfn, high_pfn; | ||
| 171 | unsigned long ro_start_pfn, ro_end_pfn; | 175 | unsigned long ro_start_pfn, ro_end_pfn; |
| 172 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
| 173 | 176 | ||
| 177 | memset(zones_size, 0, sizeof(zones_size)); | ||
| 178 | dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; | ||
| 179 | high_pfn = max_low_pfn; | ||
| 174 | ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); | 180 | ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); |
| 175 | ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); | 181 | ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); |
| 176 | 182 | ||
| 177 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 183 | if (dma_pfn > high_pfn) |
| 178 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); | 184 | zones_size[ZONE_DMA] = high_pfn; |
| 179 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 185 | else { |
| 180 | free_area_init_nodes(max_zone_pfns); | 186 | zones_size[ZONE_DMA] = dma_pfn; |
| 187 | zones_size[ZONE_NORMAL] = high_pfn - dma_pfn; | ||
| 188 | } | ||
| 189 | |||
| 190 | /* Initialize mem_map[]. */ | ||
| 191 | free_area_init_node(0, &contig_page_data, zones_size, | ||
| 192 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); | ||
| 181 | 193 | ||
| 182 | /* | 194 | /* |
| 183 | * map whole physical memory to virtual memory (identity mapping) | 195 | * map whole physical memory to virtual memory (identity mapping) |
