aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2015-06-24 19:58:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-24 20:49:44 -0400
commitfc6daaf93151877748f8096af6b3fddb147f22d6 (patch)
tree1892f34cca08d40af6598bccae87c42037c5ea80
parent6afdb859b71019143b8eecda02b8b29b03185055 (diff)
mm/memblock: add extra "flags" to memblock to allow selection of memory based on attribute
Some high end Intel Xeon systems report uncorrectable memory errors as a recoverable machine check. Linux has included code for some time to process these and just signal the affected processes (or even recover completely if the error was in a read only page that can be replaced by reading from disk). But we have no recovery path for errors encountered during kernel code execution. Except for some very specific cases were are unlikely to ever be able to recover. Enter memory mirroring. Actually 3rd generation of memory mirroing. Gen1: All memory is mirrored Pro: No s/w enabling - h/w just gets good data from other side of the mirror Con: Halves effective memory capacity available to OS/applications Gen2: Partial memory mirror - just mirror memory begind some memory controllers Pro: Keep more of the capacity Con: Nightmare to enable. Have to choose between allocating from mirrored memory for safety vs. NUMA local memory for performance Gen3: Address range partial memory mirror - some mirror on each memory controller Pro: Can tune the amount of mirror and keep NUMA performance Con: I have to write memory management code to implement The current plan is just to use mirrored memory for kernel allocations. This has been broken into two phases: 1) This patch series - find the mirrored memory, use it for boot time allocations 2) Wade into mm/page_alloc.c and define a ZONE_MIRROR to pick up the unused mirrored memory from mm/memblock.c and only give it out to select kernel allocations (this is still being scoped because page_alloc.c is scary). This patch (of 3): Add extra "flags" to memblock to allow selection of memory based on attribute. No functional changes Signed-off-by: Tony Luck <tony.luck@intel.com> Cc: Xishi Qiu <qiuxishi@huawei.com> Cc: Hanjun Guo <guohanjun@huawei.com> Cc: Xiexiuqi <xiexiuqi@huawei.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/s390/kernel/crash_dump.c5
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/x86/kernel/check.c3
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--arch/x86/mm/init_32.c2
-rw-r--r--include/linux/memblock.h41
-rw-r--r--mm/cma.c6
-rw-r--r--mm/memblock.c55
-rw-r--r--mm/memtest.c3
-rw-r--r--mm/nobootmem.c6
10 files changed, 83 insertions, 47 deletions
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index d9f0dcfcae5e..7a75ad4594e3 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -33,11 +33,12 @@ static struct memblock_type oldmem_type = {
33}; 33};
34 34
35#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \ 35#define for_each_dump_mem_range(i, nid, p_start, p_end, p_nid) \
36 for (i = 0, __next_mem_range(&i, nid, &memblock.physmem, \ 36 for (i = 0, __next_mem_range(&i, nid, MEMBLOCK_NONE, \
37 &memblock.physmem, \
37 &oldmem_type, p_start, \ 38 &oldmem_type, p_start, \
38 p_end, p_nid); \ 39 p_end, p_nid); \
39 i != (u64)ULLONG_MAX; \ 40 i != (u64)ULLONG_MAX; \
40 __next_mem_range(&i, nid, &memblock.physmem, \ 41 __next_mem_range(&i, nid, MEMBLOCK_NONE, &memblock.physmem,\
41 &oldmem_type, \ 42 &oldmem_type, \
42 p_start, p_end, p_nid)) 43 p_start, p_end, p_nid))
43 44
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index c5d08b89a96c..4ac88b757514 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1966,7 +1966,8 @@ static phys_addr_t __init available_memory(void)
1966 phys_addr_t pa_start, pa_end; 1966 phys_addr_t pa_start, pa_end;
1967 u64 i; 1967 u64 i;
1968 1968
1969 for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL) 1969 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
1970 &pa_end, NULL)
1970 available = available + (pa_end - pa_start); 1971 available = available + (pa_end - pa_start);
1971 1972
1972 return available; 1973 return available;
@@ -1992,7 +1993,8 @@ static void __init reduce_memory(phys_addr_t limit_ram)
1992 if (limit_ram >= avail_ram) 1993 if (limit_ram >= avail_ram)
1993 return; 1994 return;
1994 1995
1995 for_each_free_mem_range(i, NUMA_NO_NODE, &pa_start, &pa_end, NULL) { 1996 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
1997 &pa_end, NULL) {
1996 phys_addr_t region_size = pa_end - pa_start; 1998 phys_addr_t region_size = pa_end - pa_start;
1997 phys_addr_t clip_start = pa_start; 1999 phys_addr_t clip_start = pa_start;
1998 2000
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 83a7995625a6..58118e207a69 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -91,7 +91,8 @@ void __init setup_bios_corruption_check(void)
91 91
92 corruption_check_size = round_up(corruption_check_size, PAGE_SIZE); 92 corruption_check_size = round_up(corruption_check_size, PAGE_SIZE);
93 93
94 for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) { 94 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
95 NULL) {
95 start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE), 96 start = clamp_t(phys_addr_t, round_up(start, PAGE_SIZE),
96 PAGE_SIZE, corruption_check_size); 97 PAGE_SIZE, corruption_check_size);
97 end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE), 98 end = clamp_t(phys_addr_t, round_down(end, PAGE_SIZE),
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index e2ce85db2283..c8dda42cb6a3 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1123,7 +1123,8 @@ void __init memblock_find_dma_reserve(void)
1123 nr_pages += end_pfn - start_pfn; 1123 nr_pages += end_pfn - start_pfn;
1124 } 1124 }
1125 1125
1126 for_each_free_mem_range(u, NUMA_NO_NODE, &start, &end, NULL) { 1126 for_each_free_mem_range(u, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
1127 NULL) {
1127 start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN); 1128 start_pfn = min_t(unsigned long, PFN_UP(start), MAX_DMA_PFN);
1128 end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN); 1129 end_pfn = min_t(unsigned long, PFN_DOWN(end), MAX_DMA_PFN);
1129 if (start_pfn < end_pfn) 1130 if (start_pfn < end_pfn)
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index c8140e12816a..8340e45c891a 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -433,7 +433,7 @@ void __init add_highpages_with_active_regions(int nid,
433 phys_addr_t start, end; 433 phys_addr_t start, end;
434 u64 i; 434 u64 i;
435 435
436 for_each_free_mem_range(i, nid, &start, &end, NULL) { 436 for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
437 unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), 437 unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
438 start_pfn, end_pfn); 438 start_pfn, end_pfn);
439 unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), 439 unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 9497ec7c77ea..7aeec0cb4c27 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -21,7 +21,10 @@
21#define INIT_PHYSMEM_REGIONS 4 21#define INIT_PHYSMEM_REGIONS 4
22 22
23/* Definition of memblock flags. */ 23/* Definition of memblock flags. */
24#define MEMBLOCK_HOTPLUG 0x1 /* hotpluggable region */ 24enum {
25 MEMBLOCK_NONE = 0x0, /* No special request */
26 MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
27};
25 28
26struct memblock_region { 29struct memblock_region {
27 phys_addr_t base; 30 phys_addr_t base;
@@ -61,7 +64,7 @@ extern bool movable_node_enabled;
61 64
62phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, 65phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
63 phys_addr_t start, phys_addr_t end, 66 phys_addr_t start, phys_addr_t end,
64 int nid); 67 int nid, ulong flags);
65phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, 68phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
66 phys_addr_t size, phys_addr_t align); 69 phys_addr_t size, phys_addr_t align);
67phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr); 70phys_addr_t get_allocated_memblock_reserved_regions_info(phys_addr_t *addr);
@@ -85,11 +88,13 @@ int memblock_remove_range(struct memblock_type *type,
85 phys_addr_t base, 88 phys_addr_t base,
86 phys_addr_t size); 89 phys_addr_t size);
87 90
88void __next_mem_range(u64 *idx, int nid, struct memblock_type *type_a, 91void __next_mem_range(u64 *idx, int nid, ulong flags,
92 struct memblock_type *type_a,
89 struct memblock_type *type_b, phys_addr_t *out_start, 93 struct memblock_type *type_b, phys_addr_t *out_start,
90 phys_addr_t *out_end, int *out_nid); 94 phys_addr_t *out_end, int *out_nid);
91 95
92void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a, 96void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
97 struct memblock_type *type_a,
93 struct memblock_type *type_b, phys_addr_t *out_start, 98 struct memblock_type *type_b, phys_addr_t *out_start,
94 phys_addr_t *out_end, int *out_nid); 99 phys_addr_t *out_end, int *out_nid);
95 100
@@ -100,16 +105,17 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
100 * @type_a: ptr to memblock_type to iterate 105 * @type_a: ptr to memblock_type to iterate
101 * @type_b: ptr to memblock_type which excludes from the iteration 106 * @type_b: ptr to memblock_type which excludes from the iteration
102 * @nid: node selector, %NUMA_NO_NODE for all nodes 107 * @nid: node selector, %NUMA_NO_NODE for all nodes
108 * @flags: pick from blocks based on memory attributes
103 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 109 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
104 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 110 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
105 * @p_nid: ptr to int for nid of the range, can be %NULL 111 * @p_nid: ptr to int for nid of the range, can be %NULL
106 */ 112 */
107#define for_each_mem_range(i, type_a, type_b, nid, \ 113#define for_each_mem_range(i, type_a, type_b, nid, flags, \
108 p_start, p_end, p_nid) \ 114 p_start, p_end, p_nid) \
109 for (i = 0, __next_mem_range(&i, nid, type_a, type_b, \ 115 for (i = 0, __next_mem_range(&i, nid, flags, type_a, type_b, \
110 p_start, p_end, p_nid); \ 116 p_start, p_end, p_nid); \
111 i != (u64)ULLONG_MAX; \ 117 i != (u64)ULLONG_MAX; \
112 __next_mem_range(&i, nid, type_a, type_b, \ 118 __next_mem_range(&i, nid, flags, type_a, type_b, \
113 p_start, p_end, p_nid)) 119 p_start, p_end, p_nid))
114 120
115/** 121/**
@@ -119,17 +125,18 @@ void __next_mem_range_rev(u64 *idx, int nid, struct memblock_type *type_a,
119 * @type_a: ptr to memblock_type to iterate 125 * @type_a: ptr to memblock_type to iterate
120 * @type_b: ptr to memblock_type which excludes from the iteration 126 * @type_b: ptr to memblock_type which excludes from the iteration
121 * @nid: node selector, %NUMA_NO_NODE for all nodes 127 * @nid: node selector, %NUMA_NO_NODE for all nodes
128 * @flags: pick from blocks based on memory attributes
122 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 129 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
123 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 130 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
124 * @p_nid: ptr to int for nid of the range, can be %NULL 131 * @p_nid: ptr to int for nid of the range, can be %NULL
125 */ 132 */
126#define for_each_mem_range_rev(i, type_a, type_b, nid, \ 133#define for_each_mem_range_rev(i, type_a, type_b, nid, flags, \
127 p_start, p_end, p_nid) \ 134 p_start, p_end, p_nid) \
128 for (i = (u64)ULLONG_MAX, \ 135 for (i = (u64)ULLONG_MAX, \
129 __next_mem_range_rev(&i, nid, type_a, type_b, \ 136 __next_mem_range_rev(&i, nid, flags, type_a, type_b,\
130 p_start, p_end, p_nid); \ 137 p_start, p_end, p_nid); \
131 i != (u64)ULLONG_MAX; \ 138 i != (u64)ULLONG_MAX; \
132 __next_mem_range_rev(&i, nid, type_a, type_b, \ 139 __next_mem_range_rev(&i, nid, flags, type_a, type_b, \
133 p_start, p_end, p_nid)) 140 p_start, p_end, p_nid))
134 141
135#ifdef CONFIG_MOVABLE_NODE 142#ifdef CONFIG_MOVABLE_NODE
@@ -181,13 +188,14 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
181 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 188 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
182 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 189 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
183 * @p_nid: ptr to int for nid of the range, can be %NULL 190 * @p_nid: ptr to int for nid of the range, can be %NULL
191 * @flags: pick from blocks based on memory attributes
184 * 192 *
185 * Walks over free (memory && !reserved) areas of memblock. Available as 193 * Walks over free (memory && !reserved) areas of memblock. Available as
186 * soon as memblock is initialized. 194 * soon as memblock is initialized.
187 */ 195 */
188#define for_each_free_mem_range(i, nid, p_start, p_end, p_nid) \ 196#define for_each_free_mem_range(i, nid, flags, p_start, p_end, p_nid) \
189 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \ 197 for_each_mem_range(i, &memblock.memory, &memblock.reserved, \
190 nid, p_start, p_end, p_nid) 198 nid, flags, p_start, p_end, p_nid)
191 199
192/** 200/**
193 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas 201 * for_each_free_mem_range_reverse - rev-iterate through free memblock areas
@@ -196,13 +204,15 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
196 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 204 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
197 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 205 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
198 * @p_nid: ptr to int for nid of the range, can be %NULL 206 * @p_nid: ptr to int for nid of the range, can be %NULL
207 * @flags: pick from blocks based on memory attributes
199 * 208 *
200 * Walks over free (memory && !reserved) areas of memblock in reverse 209 * Walks over free (memory && !reserved) areas of memblock in reverse
201 * order. Available as soon as memblock is initialized. 210 * order. Available as soon as memblock is initialized.
202 */ 211 */
203#define for_each_free_mem_range_reverse(i, nid, p_start, p_end, p_nid) \ 212#define for_each_free_mem_range_reverse(i, nid, flags, p_start, p_end, \
213 p_nid) \
204 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ 214 for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \
205 nid, p_start, p_end, p_nid) 215 nid, flags, p_start, p_end, p_nid)
206 216
207static inline void memblock_set_region_flags(struct memblock_region *r, 217static inline void memblock_set_region_flags(struct memblock_region *r,
208 unsigned long flags) 218 unsigned long flags)
@@ -273,7 +283,8 @@ static inline bool memblock_bottom_up(void) { return false; }
273#define MEMBLOCK_ALLOC_ACCESSIBLE 0 283#define MEMBLOCK_ALLOC_ACCESSIBLE 0
274 284
275phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 285phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
276 phys_addr_t start, phys_addr_t end); 286 phys_addr_t start, phys_addr_t end,
287 ulong flags);
277phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, 288phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
278 phys_addr_t max_addr); 289 phys_addr_t max_addr);
279phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, 290phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
diff --git a/mm/cma.c b/mm/cma.c
index 661278025c46..e7d1db533025 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -316,13 +316,15 @@ int __init cma_declare_contiguous(phys_addr_t base,
316 */ 316 */
317 if (base < highmem_start && limit > highmem_start) { 317 if (base < highmem_start && limit > highmem_start) {
318 addr = memblock_alloc_range(size, alignment, 318 addr = memblock_alloc_range(size, alignment,
319 highmem_start, limit); 319 highmem_start, limit,
320 MEMBLOCK_NONE);
320 limit = highmem_start; 321 limit = highmem_start;
321 } 322 }
322 323
323 if (!addr) { 324 if (!addr) {
324 addr = memblock_alloc_range(size, alignment, base, 325 addr = memblock_alloc_range(size, alignment, base,
325 limit); 326 limit,
327 MEMBLOCK_NONE);
326 if (!addr) { 328 if (!addr) {
327 ret = -ENOMEM; 329 ret = -ENOMEM;
328 goto err; 330 goto err;
diff --git a/mm/memblock.c b/mm/memblock.c
index 9318b567ed79..b9ff2f4f0285 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -107,6 +107,7 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
107 * @size: size of free area to find 107 * @size: size of free area to find
108 * @align: alignment of free area to find 108 * @align: alignment of free area to find
109 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 109 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
110 * @flags: pick from blocks based on memory attributes
110 * 111 *
111 * Utility called from memblock_find_in_range_node(), find free area bottom-up. 112 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
112 * 113 *
@@ -115,12 +116,13 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
115 */ 116 */
116static phys_addr_t __init_memblock 117static phys_addr_t __init_memblock
117__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, 118__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
118 phys_addr_t size, phys_addr_t align, int nid) 119 phys_addr_t size, phys_addr_t align, int nid,
120 ulong flags)
119{ 121{
120 phys_addr_t this_start, this_end, cand; 122 phys_addr_t this_start, this_end, cand;
121 u64 i; 123 u64 i;
122 124
123 for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) { 125 for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
124 this_start = clamp(this_start, start, end); 126 this_start = clamp(this_start, start, end);
125 this_end = clamp(this_end, start, end); 127 this_end = clamp(this_end, start, end);
126 128
@@ -139,6 +141,7 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
139 * @size: size of free area to find 141 * @size: size of free area to find
140 * @align: alignment of free area to find 142 * @align: alignment of free area to find
141 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 143 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
144 * @flags: pick from blocks based on memory attributes
142 * 145 *
143 * Utility called from memblock_find_in_range_node(), find free area top-down. 146 * Utility called from memblock_find_in_range_node(), find free area top-down.
144 * 147 *
@@ -147,12 +150,14 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
147 */ 150 */
148static phys_addr_t __init_memblock 151static phys_addr_t __init_memblock
149__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, 152__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
150 phys_addr_t size, phys_addr_t align, int nid) 153 phys_addr_t size, phys_addr_t align, int nid,
154 ulong flags)
151{ 155{
152 phys_addr_t this_start, this_end, cand; 156 phys_addr_t this_start, this_end, cand;
153 u64 i; 157 u64 i;
154 158
155 for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { 159 for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
160 NULL) {
156 this_start = clamp(this_start, start, end); 161 this_start = clamp(this_start, start, end);
157 this_end = clamp(this_end, start, end); 162 this_end = clamp(this_end, start, end);
158 163
@@ -174,6 +179,7 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
174 * @start: start of candidate range 179 * @start: start of candidate range
175 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} 180 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
176 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 181 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
182 * @flags: pick from blocks based on memory attributes
177 * 183 *
178 * Find @size free area aligned to @align in the specified range and node. 184 * Find @size free area aligned to @align in the specified range and node.
179 * 185 *
@@ -190,7 +196,7 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
190 */ 196 */
191phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, 197phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
192 phys_addr_t align, phys_addr_t start, 198 phys_addr_t align, phys_addr_t start,
193 phys_addr_t end, int nid) 199 phys_addr_t end, int nid, ulong flags)
194{ 200{
195 phys_addr_t kernel_end, ret; 201 phys_addr_t kernel_end, ret;
196 202
@@ -215,7 +221,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
215 221
216 /* ok, try bottom-up allocation first */ 222 /* ok, try bottom-up allocation first */
217 ret = __memblock_find_range_bottom_up(bottom_up_start, end, 223 ret = __memblock_find_range_bottom_up(bottom_up_start, end,
218 size, align, nid); 224 size, align, nid, flags);
219 if (ret) 225 if (ret)
220 return ret; 226 return ret;
221 227
@@ -233,7 +239,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
233 "memory hotunplug may be affected\n"); 239 "memory hotunplug may be affected\n");
234 } 240 }
235 241
236 return __memblock_find_range_top_down(start, end, size, align, nid); 242 return __memblock_find_range_top_down(start, end, size, align, nid,
243 flags);
237} 244}
238 245
239/** 246/**
@@ -253,7 +260,7 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
253 phys_addr_t align) 260 phys_addr_t align)
254{ 261{
255 return memblock_find_in_range_node(size, align, start, end, 262 return memblock_find_in_range_node(size, align, start, end,
256 NUMA_NO_NODE); 263 NUMA_NO_NODE, MEMBLOCK_NONE);
257} 264}
258 265
259static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) 266static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
@@ -782,6 +789,7 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
782 * __next__mem_range - next function for for_each_free_mem_range() etc. 789 * __next__mem_range - next function for for_each_free_mem_range() etc.
783 * @idx: pointer to u64 loop variable 790 * @idx: pointer to u64 loop variable
784 * @nid: node selector, %NUMA_NO_NODE for all nodes 791 * @nid: node selector, %NUMA_NO_NODE for all nodes
792 * @flags: pick from blocks based on memory attributes
785 * @type_a: pointer to memblock_type from where the range is taken 793 * @type_a: pointer to memblock_type from where the range is taken
786 * @type_b: pointer to memblock_type which excludes memory from being taken 794 * @type_b: pointer to memblock_type which excludes memory from being taken
787 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 795 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
@@ -803,7 +811,7 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
803 * As both region arrays are sorted, the function advances the two indices 811 * As both region arrays are sorted, the function advances the two indices
804 * in lockstep and returns each intersection. 812 * in lockstep and returns each intersection.
805 */ 813 */
806void __init_memblock __next_mem_range(u64 *idx, int nid, 814void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
807 struct memblock_type *type_a, 815 struct memblock_type *type_a,
808 struct memblock_type *type_b, 816 struct memblock_type *type_b,
809 phys_addr_t *out_start, 817 phys_addr_t *out_start,
@@ -895,6 +903,7 @@ void __init_memblock __next_mem_range(u64 *idx, int nid,
895 * 903 *
896 * @idx: pointer to u64 loop variable 904 * @idx: pointer to u64 loop variable
897 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes 905 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
906 * @flags: pick from blocks based on memory attributes
898 * @type_a: pointer to memblock_type from where the range is taken 907 * @type_a: pointer to memblock_type from where the range is taken
899 * @type_b: pointer to memblock_type which excludes memory from being taken 908 * @type_b: pointer to memblock_type which excludes memory from being taken
900 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL 909 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
@@ -903,7 +912,7 @@ void __init_memblock __next_mem_range(u64 *idx, int nid,
903 * 912 *
904 * Reverse of __next_mem_range(). 913 * Reverse of __next_mem_range().
905 */ 914 */
906void __init_memblock __next_mem_range_rev(u64 *idx, int nid, 915void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
907 struct memblock_type *type_a, 916 struct memblock_type *type_a,
908 struct memblock_type *type_b, 917 struct memblock_type *type_b,
909 phys_addr_t *out_start, 918 phys_addr_t *out_start,
@@ -1050,14 +1059,15 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1050 1059
1051static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, 1060static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1052 phys_addr_t align, phys_addr_t start, 1061 phys_addr_t align, phys_addr_t start,
1053 phys_addr_t end, int nid) 1062 phys_addr_t end, int nid, ulong flags)
1054{ 1063{
1055 phys_addr_t found; 1064 phys_addr_t found;
1056 1065
1057 if (!align) 1066 if (!align)
1058 align = SMP_CACHE_BYTES; 1067 align = SMP_CACHE_BYTES;
1059 1068
1060 found = memblock_find_in_range_node(size, align, start, end, nid); 1069 found = memblock_find_in_range_node(size, align, start, end, nid,
1070 flags);
1061 if (found && !memblock_reserve(found, size)) { 1071 if (found && !memblock_reserve(found, size)) {
1062 /* 1072 /*
1063 * The min_count is set to 0 so that memblock allocations are 1073 * The min_count is set to 0 so that memblock allocations are
@@ -1070,26 +1080,30 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
1070} 1080}
1071 1081
1072phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, 1082phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1073 phys_addr_t start, phys_addr_t end) 1083 phys_addr_t start, phys_addr_t end,
1084 ulong flags)
1074{ 1085{
1075 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE); 1086 return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
1087 flags);
1076} 1088}
1077 1089
1078static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, 1090static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
1079 phys_addr_t align, phys_addr_t max_addr, 1091 phys_addr_t align, phys_addr_t max_addr,
1080 int nid) 1092 int nid, ulong flags)
1081{ 1093{
1082 return memblock_alloc_range_nid(size, align, 0, max_addr, nid); 1094 return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1083} 1095}
1084 1096
1085phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) 1097phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
1086{ 1098{
1087 return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE, nid); 1099 return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
1100 nid, MEMBLOCK_NONE);
1088} 1101}
1089 1102
1090phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1103phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
1091{ 1104{
1092 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE); 1105 return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
1106 MEMBLOCK_NONE);
1093} 1107}
1094 1108
1095phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) 1109phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
@@ -1173,13 +1187,14 @@ static void * __init memblock_virt_alloc_internal(
1173 1187
1174again: 1188again:
1175 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr, 1189 alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1176 nid); 1190 nid, MEMBLOCK_NONE);
1177 if (alloc) 1191 if (alloc)
1178 goto done; 1192 goto done;
1179 1193
1180 if (nid != NUMA_NO_NODE) { 1194 if (nid != NUMA_NO_NODE) {
1181 alloc = memblock_find_in_range_node(size, align, min_addr, 1195 alloc = memblock_find_in_range_node(size, align, min_addr,
1182 max_addr, NUMA_NO_NODE); 1196 max_addr, NUMA_NO_NODE,
1197 MEMBLOCK_NONE);
1183 if (alloc) 1198 if (alloc)
1184 goto done; 1199 goto done;
1185 } 1200 }
diff --git a/mm/memtest.c b/mm/memtest.c
index 1997d934b13b..0a1cc133f6d7 100644
--- a/mm/memtest.c
+++ b/mm/memtest.c
@@ -74,7 +74,8 @@ static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end)
74 u64 i; 74 u64 i;
75 phys_addr_t this_start, this_end; 75 phys_addr_t this_start, this_end;
76 76
77 for_each_free_mem_range(i, NUMA_NO_NODE, &this_start, &this_end, NULL) { 77 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &this_start,
78 &this_end, NULL) {
78 this_start = clamp(this_start, start, end); 79 this_start = clamp(this_start, start, end);
79 this_end = clamp(this_end, start, end); 80 this_end = clamp(this_end, start, end);
80 if (this_start < this_end) { 81 if (this_start < this_end) {
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 90b50468333e..ad3641dcdbe7 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -41,7 +41,8 @@ static void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
41 if (limit > memblock.current_limit) 41 if (limit > memblock.current_limit)
42 limit = memblock.current_limit; 42 limit = memblock.current_limit;
43 43
44 addr = memblock_find_in_range_node(size, align, goal, limit, nid); 44 addr = memblock_find_in_range_node(size, align, goal, limit, nid,
45 MEMBLOCK_NONE);
45 if (!addr) 46 if (!addr)
46 return NULL; 47 return NULL;
47 48
@@ -121,7 +122,8 @@ static unsigned long __init free_low_memory_core_early(void)
121 122
122 memblock_clear_hotplug(0, -1); 123 memblock_clear_hotplug(0, -1);
123 124
124 for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) 125 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
126 NULL)
125 count += __free_memory_core(start, end); 127 count += __free_memory_core(start, end);
126 128
127#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK 129#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK