diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-13 01:45:43 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-11-13 01:45:43 -0500 |
commit | 5cbb3d216e2041700231bcfc383ee5f8b7fc8b74 (patch) | |
tree | a738fa82dbcefa9bd283c08bc67f38827be63937 /arch/x86/mm | |
parent | 9bc9ccd7db1c9f043f75380b5a5b94912046a60e (diff) | |
parent | 4e9b45a19241354daec281d7a785739829b52359 (diff) |
Merge branch 'akpm' (patches from Andrew Morton)
Merge first patch-bomb from Andrew Morton:
"Quite a lot of other stuff is banked up awaiting further
next->mainline merging, but this batch contains:
- Lots of random misc patches
- OCFS2
- Most of MM
- backlight updates
- lib/ updates
- printk updates
- checkpatch updates
- epoll tweaking
- rtc updates
- hfs
- hfsplus
- documentation
- procfs
- update gcov to gcc-4.7 format
- IPC"
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (269 commits)
ipc, msg: fix message length check for negative values
ipc/util.c: remove unnecessary work pending test
devpts: plug the memory leak in kill_sb
./Makefile: export initial ramdisk compression config option
init/Kconfig: add option to disable kernel compression
drivers: w1: make w1_slave::flags long to avoid memory corruption
drivers/w1/masters/ds1wm.cuse dev_get_platdata()
drivers/memstick/core/ms_block.c: fix unreachable state in h_msb_read_page()
drivers/memstick/core/mspro_block.c: fix attributes array allocation
drivers/pps/clients/pps-gpio.c: remove redundant of_match_ptr
kernel/panic.c: reduce 1 byte usage for print tainted buffer
gcov: reuse kbasename helper
kernel/gcov/fs.c: use pr_warn()
kernel/module.c: use pr_foo()
gcov: compile specific gcov implementation based on gcc version
gcov: add support for gcc 4.7 gcov format
gcov: move gcov structs definitions to a gcc version specific file
kernel/taskstats.c: return -ENOMEM when alloc memory fails in add_del_listener()
kernel/taskstats.c: add nla_nest_cancel() for failure processing between nla_nest_start() and nla_nest_end()
kernel/sysctl_binary.c: use scnprintf() instead of snprintf()
...
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/init.c | 125 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 11 |
2 files changed, 114 insertions, 22 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index ce32017c5e38..f97130618113 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -53,12 +53,12 @@ __ref void *alloc_low_pages(unsigned int num) | |||
53 | if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { | 53 | if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) { |
54 | unsigned long ret; | 54 | unsigned long ret; |
55 | if (min_pfn_mapped >= max_pfn_mapped) | 55 | if (min_pfn_mapped >= max_pfn_mapped) |
56 | panic("alloc_low_page: ran out of memory"); | 56 | panic("alloc_low_pages: ran out of memory"); |
57 | ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, | 57 | ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT, |
58 | max_pfn_mapped << PAGE_SHIFT, | 58 | max_pfn_mapped << PAGE_SHIFT, |
59 | PAGE_SIZE * num , PAGE_SIZE); | 59 | PAGE_SIZE * num , PAGE_SIZE); |
60 | if (!ret) | 60 | if (!ret) |
61 | panic("alloc_low_page: can not alloc memory"); | 61 | panic("alloc_low_pages: can not alloc memory"); |
62 | memblock_reserve(ret, PAGE_SIZE * num); | 62 | memblock_reserve(ret, PAGE_SIZE * num); |
63 | pfn = ret >> PAGE_SHIFT; | 63 | pfn = ret >> PAGE_SHIFT; |
64 | } else { | 64 | } else { |
@@ -418,27 +418,27 @@ static unsigned long __init get_new_step_size(unsigned long step_size) | |||
418 | return step_size << 5; | 418 | return step_size << 5; |
419 | } | 419 | } |
420 | 420 | ||
421 | void __init init_mem_mapping(void) | 421 | /** |
422 | * memory_map_top_down - Map [map_start, map_end) top down | ||
423 | * @map_start: start address of the target memory range | ||
424 | * @map_end: end address of the target memory range | ||
425 | * | ||
426 | * This function will setup direct mapping for memory range | ||
427 | * [map_start, map_end) in top-down. That said, the page tables | ||
428 | * will be allocated at the end of the memory, and we map the | ||
429 | * memory in top-down. | ||
430 | */ | ||
431 | static void __init memory_map_top_down(unsigned long map_start, | ||
432 | unsigned long map_end) | ||
422 | { | 433 | { |
423 | unsigned long end, real_end, start, last_start; | 434 | unsigned long real_end, start, last_start; |
424 | unsigned long step_size; | 435 | unsigned long step_size; |
425 | unsigned long addr; | 436 | unsigned long addr; |
426 | unsigned long mapped_ram_size = 0; | 437 | unsigned long mapped_ram_size = 0; |
427 | unsigned long new_mapped_ram_size; | 438 | unsigned long new_mapped_ram_size; |
428 | 439 | ||
429 | probe_page_size_mask(); | ||
430 | |||
431 | #ifdef CONFIG_X86_64 | ||
432 | end = max_pfn << PAGE_SHIFT; | ||
433 | #else | ||
434 | end = max_low_pfn << PAGE_SHIFT; | ||
435 | #endif | ||
436 | |||
437 | /* the ISA range is always mapped regardless of memory holes */ | ||
438 | init_memory_mapping(0, ISA_END_ADDRESS); | ||
439 | |||
440 | /* xen has big range in reserved near end of ram, skip it at first.*/ | 440 | /* xen has big range in reserved near end of ram, skip it at first.*/ |
441 | addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE); | 441 | addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); |
442 | real_end = addr + PMD_SIZE; | 442 | real_end = addr + PMD_SIZE; |
443 | 443 | ||
444 | /* step_size need to be small so pgt_buf from BRK could cover it */ | 444 | /* step_size need to be small so pgt_buf from BRK could cover it */ |
@@ -453,13 +453,13 @@ void __init init_mem_mapping(void) | |||
453 | * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages | 453 | * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages |
454 | * for page table. | 454 | * for page table. |
455 | */ | 455 | */ |
456 | while (last_start > ISA_END_ADDRESS) { | 456 | while (last_start > map_start) { |
457 | if (last_start > step_size) { | 457 | if (last_start > step_size) { |
458 | start = round_down(last_start - 1, step_size); | 458 | start = round_down(last_start - 1, step_size); |
459 | if (start < ISA_END_ADDRESS) | 459 | if (start < map_start) |
460 | start = ISA_END_ADDRESS; | 460 | start = map_start; |
461 | } else | 461 | } else |
462 | start = ISA_END_ADDRESS; | 462 | start = map_start; |
463 | new_mapped_ram_size = init_range_memory_mapping(start, | 463 | new_mapped_ram_size = init_range_memory_mapping(start, |
464 | last_start); | 464 | last_start); |
465 | last_start = start; | 465 | last_start = start; |
@@ -470,8 +470,89 @@ void __init init_mem_mapping(void) | |||
470 | mapped_ram_size += new_mapped_ram_size; | 470 | mapped_ram_size += new_mapped_ram_size; |
471 | } | 471 | } |
472 | 472 | ||
473 | if (real_end < end) | 473 | if (real_end < map_end) |
474 | init_range_memory_mapping(real_end, end); | 474 | init_range_memory_mapping(real_end, map_end); |
475 | } | ||
476 | |||
477 | /** | ||
478 | * memory_map_bottom_up - Map [map_start, map_end) bottom up | ||
479 | * @map_start: start address of the target memory range | ||
480 | * @map_end: end address of the target memory range | ||
481 | * | ||
482 | * This function will setup direct mapping for memory range | ||
483 | * [map_start, map_end) in bottom-up. Since we have limited the | ||
484 | * bottom-up allocation above the kernel, the page tables will | ||
485 | * be allocated just above the kernel and we map the memory | ||
486 | * in [map_start, map_end) in bottom-up. | ||
487 | */ | ||
488 | static void __init memory_map_bottom_up(unsigned long map_start, | ||
489 | unsigned long map_end) | ||
490 | { | ||
491 | unsigned long next, new_mapped_ram_size, start; | ||
492 | unsigned long mapped_ram_size = 0; | ||
493 | /* step_size need to be small so pgt_buf from BRK could cover it */ | ||
494 | unsigned long step_size = PMD_SIZE; | ||
495 | |||
496 | start = map_start; | ||
497 | min_pfn_mapped = start >> PAGE_SHIFT; | ||
498 | |||
499 | /* | ||
500 | * We start from the bottom (@map_start) and go to the top (@map_end). | ||
501 | * The memblock_find_in_range() gets us a block of RAM from the | ||
502 | * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages | ||
503 | * for page table. | ||
504 | */ | ||
505 | while (start < map_end) { | ||
506 | if (map_end - start > step_size) { | ||
507 | next = round_up(start + 1, step_size); | ||
508 | if (next > map_end) | ||
509 | next = map_end; | ||
510 | } else | ||
511 | next = map_end; | ||
512 | |||
513 | new_mapped_ram_size = init_range_memory_mapping(start, next); | ||
514 | start = next; | ||
515 | |||
516 | if (new_mapped_ram_size > mapped_ram_size) | ||
517 | step_size = get_new_step_size(step_size); | ||
518 | mapped_ram_size += new_mapped_ram_size; | ||
519 | } | ||
520 | } | ||
521 | |||
522 | void __init init_mem_mapping(void) | ||
523 | { | ||
524 | unsigned long end; | ||
525 | |||
526 | probe_page_size_mask(); | ||
527 | |||
528 | #ifdef CONFIG_X86_64 | ||
529 | end = max_pfn << PAGE_SHIFT; | ||
530 | #else | ||
531 | end = max_low_pfn << PAGE_SHIFT; | ||
532 | #endif | ||
533 | |||
534 | /* the ISA range is always mapped regardless of memory holes */ | ||
535 | init_memory_mapping(0, ISA_END_ADDRESS); | ||
536 | |||
537 | /* | ||
538 | * If the allocation is in bottom-up direction, we setup direct mapping | ||
539 | * in bottom-up, otherwise we setup direct mapping in top-down. | ||
540 | */ | ||
541 | if (memblock_bottom_up()) { | ||
542 | unsigned long kernel_end = __pa_symbol(_end); | ||
543 | |||
544 | /* | ||
545 | * we need two separate calls here. This is because we want to | ||
546 | * allocate page tables above the kernel. So we first map | ||
547 | * [kernel_end, end) to make memory above the kernel be mapped | ||
548 | * as soon as possible. And then use page tables allocated above | ||
549 | * the kernel to map [ISA_END_ADDRESS, kernel_end). | ||
550 | */ | ||
551 | memory_map_bottom_up(kernel_end, end); | ||
552 | memory_map_bottom_up(ISA_END_ADDRESS, kernel_end); | ||
553 | } else { | ||
554 | memory_map_top_down(ISA_END_ADDRESS, end); | ||
555 | } | ||
475 | 556 | ||
476 | #ifdef CONFIG_X86_64 | 557 | #ifdef CONFIG_X86_64 |
477 | if (max_pfn > max_low_pfn) { | 558 | if (max_pfn > max_low_pfn) { |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 8bf93bae1f13..24aec58d6afd 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -567,6 +567,17 @@ static int __init numa_init(int (*init_func)(void)) | |||
567 | ret = init_func(); | 567 | ret = init_func(); |
568 | if (ret < 0) | 568 | if (ret < 0) |
569 | return ret; | 569 | return ret; |
570 | |||
571 | /* | ||
572 | * We reset memblock back to the top-down direction | ||
573 | * here because if we configured ACPI_NUMA, we have | ||
574 | * parsed SRAT in init_func(). It is ok to have the | ||
575 | * reset here even if we did't configure ACPI_NUMA | ||
576 | * or acpi numa init fails and fallbacks to dummy | ||
577 | * numa init. | ||
578 | */ | ||
579 | memblock_set_bottom_up(false); | ||
580 | |||
570 | ret = numa_cleanup_meminfo(&numa_meminfo); | 581 | ret = numa_cleanup_meminfo(&numa_meminfo); |
571 | if (ret < 0) | 582 | if (ret < 0) |
572 | return ret; | 583 | return ret; |