summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.vnet.ibm.com>2018-10-30 18:09:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-31 11:54:16 -0400
commit97ad1087efffed26cb00e310a927f9603332dfcb (patch)
tree4a3a2e83fe8bce2f35abb69fe82e51246804ffb2
parentbda49a81164ab3e62f5348447345711586fd42e9 (diff)
memblock: replace BOOTMEM_ALLOC_* with MEMBLOCK variants
Drop BOOTMEM_ALLOC_ACCESSIBLE and BOOTMEM_ALLOC_ANYWHERE in favor of identical MEMBLOCK definitions. Link: http://lkml.kernel.org/r/1536927045-23536-29-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/ia64/mm/discontig.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/sparc/kernel/smp_64.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c4
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/kasan/kasan_init.c2
-rw-r--r--mm/memblock.c8
-rw-r--r--mm/page_ext.c2
-rw-r--r--mm/sparse-vmemmap.c3
-rw-r--r--mm/sparse.c5
11 files changed, 19 insertions, 16 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index 918dda972975..70609f823960 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -453,7 +453,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
453 453
454 ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE, 454 ptr = memblock_alloc_try_nid(pernodesize, PERCPU_PAGE_SIZE,
455 __pa(MAX_DMA_ADDRESS), 455 __pa(MAX_DMA_ADDRESS),
456 BOOTMEM_ALLOC_ACCESSIBLE, 456 MEMBLOCK_ALLOC_ACCESSIBLE,
457 bestnode); 457 bestnode);
458 458
459 return ptr; 459 return ptr;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index f90ab3ea9af3..9216c3a7fcfc 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -764,7 +764,7 @@ void __init emergency_stack_init(void)
764static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) 764static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
765{ 765{
766 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS), 766 return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
767 BOOTMEM_ALLOC_ACCESSIBLE, 767 MEMBLOCK_ALLOC_ACCESSIBLE,
768 early_cpu_to_node(cpu)); 768 early_cpu_to_node(cpu));
769 769
770} 770}
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index a087a6a25f06..6cc80d0f4b9f 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1595,7 +1595,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
1595 cpu, size, __pa(ptr)); 1595 cpu, size, __pa(ptr));
1596 } else { 1596 } else {
1597 ptr = memblock_alloc_try_nid(size, align, goal, 1597 ptr = memblock_alloc_try_nid(size, align, goal,
1598 BOOTMEM_ALLOC_ACCESSIBLE, node); 1598 MEMBLOCK_ALLOC_ACCESSIBLE, node);
1599 pr_debug("per cpu data for cpu%d %lu bytes on node%d at " 1599 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
1600 "%016lx\n", cpu, size, node, __pa(ptr)); 1600 "%016lx\n", cpu, size, node, __pa(ptr));
1601 } 1601 }
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index a006f1ba4c39..483412fb8a24 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -114,7 +114,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
114 cpu, size, __pa(ptr)); 114 cpu, size, __pa(ptr));
115 } else { 115 } else {
116 ptr = memblock_alloc_try_nid_nopanic(size, align, goal, 116 ptr = memblock_alloc_try_nid_nopanic(size, align, goal,
117 BOOTMEM_ALLOC_ACCESSIBLE, 117 MEMBLOCK_ALLOC_ACCESSIBLE,
118 node); 118 node);
119 119
120 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n", 120 pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 77b857cb036f..8f87499124b8 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -29,10 +29,10 @@ static __init void *early_alloc(size_t size, int nid, bool panic)
29{ 29{
30 if (panic) 30 if (panic)
31 return memblock_alloc_try_nid(size, size, 31 return memblock_alloc_try_nid(size, size,
32 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); 32 __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
33 else 33 else
34 return memblock_alloc_try_nid_nopanic(size, size, 34 return memblock_alloc_try_nid_nopanic(size, size,
35 __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); 35 __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
36} 36}
37 37
38static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, 38static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 51e9f17dbd5c..e35d99844612 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -16,6 +16,7 @@
16#include <linux/cpuset.h> 16#include <linux/cpuset.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/bootmem.h> 18#include <linux/bootmem.h>
19#include <linux/memblock.h>
19#include <linux/sysfs.h> 20#include <linux/sysfs.h>
20#include <linux/slab.h> 21#include <linux/slab.h>
21#include <linux/mmdebug.h> 22#include <linux/mmdebug.h>
@@ -2102,7 +2103,7 @@ int __alloc_bootmem_huge_page(struct hstate *h)
2102 2103
2103 addr = memblock_alloc_try_nid_raw( 2104 addr = memblock_alloc_try_nid_raw(
2104 huge_page_size(h), huge_page_size(h), 2105 huge_page_size(h), huge_page_size(h),
2105 0, BOOTMEM_ALLOC_ACCESSIBLE, node); 2106 0, MEMBLOCK_ALLOC_ACCESSIBLE, node);
2106 if (addr) { 2107 if (addr) {
2107 /* 2108 /*
2108 * Use the beginning of the huge page to store the 2109 * Use the beginning of the huge page to store the
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
index 24d734bdff6b..785a9707786b 100644
--- a/mm/kasan/kasan_init.c
+++ b/mm/kasan/kasan_init.c
@@ -84,7 +84,7 @@ static inline bool kasan_zero_page_entry(pte_t pte)
84static __init void *early_alloc(size_t size, int node) 84static __init void *early_alloc(size_t size, int node)
85{ 85{
86 return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS), 86 return memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
87 BOOTMEM_ALLOC_ACCESSIBLE, node); 87 MEMBLOCK_ALLOC_ACCESSIBLE, node);
88} 88}
89 89
90static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, 90static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
diff --git a/mm/memblock.c b/mm/memblock.c
index 3dd9cfef996c..2ed73245b5da 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1342,7 +1342,7 @@ phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t ali
1342 * hold the requested memory. 1342 * hold the requested memory.
1343 * 1343 *
1344 * The allocation is performed from memory region limited by 1344 * The allocation is performed from memory region limited by
1345 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. 1345 * memblock.current_limit if @max_addr == %MEMBLOCK_ALLOC_ACCESSIBLE.
1346 * 1346 *
1347 * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0. 1347 * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0.
1348 * 1348 *
@@ -1429,7 +1429,7 @@ done:
1429 * @min_addr: the lower bound of the memory region from where the allocation 1429 * @min_addr: the lower bound of the memory region from where the allocation
1430 * is preferred (phys address) 1430 * is preferred (phys address)
1431 * @max_addr: the upper bound of the memory region from where the allocation 1431 * @max_addr: the upper bound of the memory region from where the allocation
1432 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1432 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1433 * allocate only from memory limited by memblock.current_limit value 1433 * allocate only from memory limited by memblock.current_limit value
1434 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1434 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1435 * 1435 *
@@ -1466,7 +1466,7 @@ void * __init memblock_alloc_try_nid_raw(
1466 * @min_addr: the lower bound of the memory region from where the allocation 1466 * @min_addr: the lower bound of the memory region from where the allocation
1467 * is preferred (phys address) 1467 * is preferred (phys address)
1468 * @max_addr: the upper bound of the memory region from where the allocation 1468 * @max_addr: the upper bound of the memory region from where the allocation
1469 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1469 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1470 * allocate only from memory limited by memblock.current_limit value 1470 * allocate only from memory limited by memblock.current_limit value
1471 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1471 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1472 * 1472 *
@@ -1501,7 +1501,7 @@ void * __init memblock_alloc_try_nid_nopanic(
1501 * @min_addr: the lower bound of the memory region from where the allocation 1501 * @min_addr: the lower bound of the memory region from where the allocation
1502 * is preferred (phys address) 1502 * is preferred (phys address)
1503 * @max_addr: the upper bound of the memory region from where the allocation 1503 * @max_addr: the upper bound of the memory region from where the allocation
1504 * is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to 1504 * is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
1505 * allocate only from memory limited by memblock.current_limit value 1505 * allocate only from memory limited by memblock.current_limit value
1506 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node 1506 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
1507 * 1507 *
diff --git a/mm/page_ext.c b/mm/page_ext.c
index e77c0f031dd0..5323c2ade686 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -163,7 +163,7 @@ static int __init alloc_node_page_ext(int nid)
163 163
164 base = memblock_alloc_try_nid_nopanic( 164 base = memblock_alloc_try_nid_nopanic(
165 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 165 table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
166 BOOTMEM_ALLOC_ACCESSIBLE, nid); 166 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
167 if (!base) 167 if (!base)
168 return -ENOMEM; 168 return -ENOMEM;
169 NODE_DATA(nid)->node_page_ext = base; 169 NODE_DATA(nid)->node_page_ext = base;
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 91c2c3d25827..7408cabed61a 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -21,6 +21,7 @@
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/mmzone.h> 22#include <linux/mmzone.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/memblock.h>
24#include <linux/memremap.h> 25#include <linux/memremap.h>
25#include <linux/highmem.h> 26#include <linux/highmem.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
@@ -43,7 +44,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node,
43 unsigned long goal) 44 unsigned long goal)
44{ 45{
45 return memblock_alloc_try_nid_raw(size, align, goal, 46 return memblock_alloc_try_nid_raw(size, align, goal,
46 BOOTMEM_ALLOC_ACCESSIBLE, node); 47 MEMBLOCK_ALLOC_ACCESSIBLE, node);
47} 48}
48 49
49void * __meminit vmemmap_alloc_block(unsigned long size, int node) 50void * __meminit vmemmap_alloc_block(unsigned long size, int node)
diff --git a/mm/sparse.c b/mm/sparse.c
index d1296610562b..b139fbc61d10 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -6,6 +6,7 @@
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/mmzone.h> 7#include <linux/mmzone.h>
8#include <linux/bootmem.h> 8#include <linux/bootmem.h>
9#include <linux/memblock.h>
9#include <linux/compiler.h> 10#include <linux/compiler.h>
10#include <linux/highmem.h> 11#include <linux/highmem.h>
11#include <linux/export.h> 12#include <linux/export.h>
@@ -393,7 +394,7 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid,
393 394
394 map = memblock_alloc_try_nid(size, 395 map = memblock_alloc_try_nid(size,
395 PAGE_SIZE, __pa(MAX_DMA_ADDRESS), 396 PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
396 BOOTMEM_ALLOC_ACCESSIBLE, nid); 397 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
397 return map; 398 return map;
398} 399}
399#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ 400#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -407,7 +408,7 @@ static void __init sparse_buffer_init(unsigned long size, int nid)
407 sparsemap_buf = 408 sparsemap_buf =
408 memblock_alloc_try_nid_raw(size, PAGE_SIZE, 409 memblock_alloc_try_nid_raw(size, PAGE_SIZE,
409 __pa(MAX_DMA_ADDRESS), 410 __pa(MAX_DMA_ADDRESS),
410 BOOTMEM_ALLOC_ACCESSIBLE, nid); 411 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
411 sparsemap_buf_end = sparsemap_buf + size; 412 sparsemap_buf_end = sparsemap_buf + size;
412} 413}
413 414