diff options
author | Tang Chen <tangchen@cn.fujitsu.com> | 2014-01-21 18:49:35 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-21 19:19:45 -0500 |
commit | 55ac590c2fadad785d60dd70c12d62823bc2cd39 (patch) | |
tree | 0b373b4943087706f8e0bee973d61d0bf24687df /mm/memblock.c | |
parent | a0acda917284183f9b71e2d08b0aa0aea722b321 (diff) |
memblock, mem_hotplug: make memblock skip hotpluggable regions if needed
Linux kernel cannot migrate pages used by the kernel. As a result,
hotpluggable memory used by the kernel won't be able to be hot-removed.
To solve this problem, the basic idea is to prevent memblock from
allocating hotpluggable memory for the kernel at early time, and arrange
all hotpluggable memory in ACPI SRAT(System Resource Affinity Table) as
ZONE_MOVABLE when initializing zones.
In the previous patches, we have marked hotpluggable memory regions with
MEMBLOCK_HOTPLUG flag in memblock.memory.
In this patch, we make memblock skip these hotpluggable memory regions
in the default top-down allocation function if movable_node boot option
is specified.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Tang Chen <tangchen@cn.fujitsu.com>
Signed-off-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: "Rafael J . Wysocki" <rjw@sisk.pl>
Cc: Chen Tang <imtangchen@gmail.com>
Cc: Gong Chen <gong.chen@linux.intel.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Jiang Liu <jiang.liu@huawei.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Len Brown <lenb@kernel.org>
Cc: Liu Jiang <jiang.liu@huawei.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Prarit Bhargava <prarit@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Taku Izumi <izumi.taku@jp.fujitsu.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Renninger <trenn@suse.de>
Cc: Toshi Kani <toshi.kani@hp.com>
Cc: Vasilis Liaskovitis <vasilis.liaskovitis@profitbricks.com>
Cc: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Cc: Wen Congyang <wency@cn.fujitsu.com>
Cc: Yasuaki Ishimatsu <isimatu.yasuaki@jp.fujitsu.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index d5681008dce1..6a2a48a122a9 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -39,6 +39,9 @@ struct memblock memblock __initdata_memblock = { | |||
39 | }; | 39 | }; |
40 | 40 | ||
41 | int memblock_debug __initdata_memblock; | 41 | int memblock_debug __initdata_memblock; |
42 | #ifdef CONFIG_MOVABLE_NODE | ||
43 | bool movable_node_enabled __initdata_memblock = false; | ||
44 | #endif | ||
42 | static int memblock_can_resize __initdata_memblock; | 45 | static int memblock_can_resize __initdata_memblock; |
43 | static int memblock_memory_in_slab __initdata_memblock = 0; | 46 | static int memblock_memory_in_slab __initdata_memblock = 0; |
44 | static int memblock_reserved_in_slab __initdata_memblock = 0; | 47 | static int memblock_reserved_in_slab __initdata_memblock = 0; |
@@ -820,6 +823,11 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid, | |||
820 | * @out_nid: ptr to int for nid of the range, can be %NULL | 823 | * @out_nid: ptr to int for nid of the range, can be %NULL |
821 | * | 824 | * |
822 | * Reverse of __next_free_mem_range(). | 825 | * Reverse of __next_free_mem_range(). |
826 | * | ||
827 | * Linux kernel cannot migrate pages used by itself. Memory hotplug users won't | ||
828 | * be able to hot-remove hotpluggable memory used by the kernel. So this | ||
829 | * function skip hotpluggable regions if needed when allocating memory for the | ||
830 | * kernel. | ||
823 | */ | 831 | */ |
824 | void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, | 832 | void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, |
825 | phys_addr_t *out_start, | 833 | phys_addr_t *out_start, |
@@ -844,6 +852,10 @@ void __init_memblock __next_free_mem_range_rev(u64 *idx, int nid, | |||
844 | if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) | 852 | if (nid != MAX_NUMNODES && nid != memblock_get_region_node(m)) |
845 | continue; | 853 | continue; |
846 | 854 | ||
855 | /* skip hotpluggable memory regions if needed */ | ||
856 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) | ||
857 | continue; | ||
858 | |||
847 | /* scan areas before each reservation for intersection */ | 859 | /* scan areas before each reservation for intersection */ |
848 | for ( ; ri >= 0; ri--) { | 860 | for ( ; ri >= 0; ri--) { |
849 | struct memblock_region *r = &rsv->regions[ri]; | 861 | struct memblock_region *r = &rsv->regions[ri]; |