aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.vnet.ibm.com>2018-10-30 18:08:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-31 11:54:15 -0400
commit15c3c114ed144e5d9ad0f9e8f9f2998bae372190 (patch)
treecff75804b9f7729c36f5d9fc5ab5e552f7a24cd0
parente8625dce71b4c23eb81bc9b023c7628807df89e8 (diff)
memblock: replace alloc_bootmem_pages with memblock_alloc
The alloc_bootmem_pages() function allocates PAGE_SIZE aligned memory. memblock_alloc() with alignment set to PAGE_SIZE does exactly the same thing. The conversion is done using the following semantic patch: @@ expression e; @@ - alloc_bootmem_pages(e) + memblock_alloc(e, PAGE_SIZE) Link: http://lkml.kernel.org/r/1536927045-23536-20-git-send-email-rppt@linux.vnet.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Jonas Bonn <jonas@southpole.se> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Ley Foon Tan <lftan@altera.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Palmer Dabbelt <palmer@sifive.com> Cc: Paul Burton <paul.burton@mips.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Serge Semin <fancer.lancer@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/c6x/mm/init.c3
-rw-r--r--arch/h8300/mm/init.c2
-rw-r--r--arch/m68k/mm/init.c2
-rw-r--r--arch/m68k/mm/mcfmmu.c4
-rw-r--r--arch/m68k/mm/motorola.c2
-rw-r--r--arch/m68k/mm/sun3mmu.c4
-rw-r--r--arch/sh/mm/init.c4
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--drivers/xen/swiotlb-xen.c3
10 files changed, 16 insertions, 13 deletions
diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c
index 4cc72b0d1c1d..dc369ad8b0ba 100644
--- a/arch/c6x/mm/init.c
+++ b/arch/c6x/mm/init.c
@@ -38,7 +38,8 @@ void __init paging_init(void)
38 struct pglist_data *pgdat = NODE_DATA(0); 38 struct pglist_data *pgdat = NODE_DATA(0);
39 unsigned long zones_size[MAX_NR_ZONES] = {0, }; 39 unsigned long zones_size[MAX_NR_ZONES] = {0, };
40 40
41 empty_zero_page = (unsigned long) alloc_bootmem_pages(PAGE_SIZE); 41 empty_zero_page = (unsigned long) memblock_alloc(PAGE_SIZE,
42 PAGE_SIZE);
42 memset((void *)empty_zero_page, 0, PAGE_SIZE); 43 memset((void *)empty_zero_page, 0, PAGE_SIZE);
43 44
44 /* 45 /*
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 015287ac8ce8..5d31ac9d7a8d 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -67,7 +67,7 @@ void __init paging_init(void)
67 * Initialize the bad page table and bad page to point 67 * Initialize the bad page table and bad page to point
68 * to a couple of allocated pages. 68 * to a couple of allocated pages.
69 */ 69 */
70 empty_zero_page = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 70 empty_zero_page = (unsigned long)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
71 memset((void *)empty_zero_page, 0, PAGE_SIZE); 71 memset((void *)empty_zero_page, 0, PAGE_SIZE);
72 72
73 /* 73 /*
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c
index 38e2b272c220..977363eda125 100644
--- a/arch/m68k/mm/init.c
+++ b/arch/m68k/mm/init.c
@@ -93,7 +93,7 @@ void __init paging_init(void)
93 93
94 high_memory = (void *) end_mem; 94 high_memory = (void *) end_mem;
95 95
96 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 96 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
97 97
98 /* 98 /*
99 * Set up SFC/DFC registers (user data space). 99 * Set up SFC/DFC registers (user data space).
diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c
index f5453d944ff5..38a1d92dd555 100644
--- a/arch/m68k/mm/mcfmmu.c
+++ b/arch/m68k/mm/mcfmmu.c
@@ -44,7 +44,7 @@ void __init paging_init(void)
44 enum zone_type zone; 44 enum zone_type zone;
45 int i; 45 int i;
46 46
47 empty_zero_page = (void *) alloc_bootmem_pages(PAGE_SIZE); 47 empty_zero_page = (void *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
48 memset((void *) empty_zero_page, 0, PAGE_SIZE); 48 memset((void *) empty_zero_page, 0, PAGE_SIZE);
49 49
50 pg_dir = swapper_pg_dir; 50 pg_dir = swapper_pg_dir;
@@ -52,7 +52,7 @@ void __init paging_init(void)
52 52
53 size = num_pages * sizeof(pte_t); 53 size = num_pages * sizeof(pte_t);
54 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); 54 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
55 next_pgtable = (unsigned long) alloc_bootmem_pages(size); 55 next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
56 56
57 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; 57 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
58 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; 58 pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c
index 8bcf57ec5127..2113eec8dbf9 100644
--- a/arch/m68k/mm/motorola.c
+++ b/arch/m68k/mm/motorola.c
@@ -276,7 +276,7 @@ void __init paging_init(void)
276 * initialize the bad page table and bad page to point 276 * initialize the bad page table and bad page to point
277 * to a couple of allocated pages 277 * to a couple of allocated pages
278 */ 278 */
279 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 279 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
280 280
281 /* 281 /*
282 * Set up SFC/DFC registers 282 * Set up SFC/DFC registers
diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c
index 4a9979908357..19c05ab9824d 100644
--- a/arch/m68k/mm/sun3mmu.c
+++ b/arch/m68k/mm/sun3mmu.c
@@ -45,7 +45,7 @@ void __init paging_init(void)
45 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 45 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
46 unsigned long size; 46 unsigned long size;
47 47
48 empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); 48 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
49 49
50 address = PAGE_OFFSET; 50 address = PAGE_OFFSET;
51 pg_dir = swapper_pg_dir; 51 pg_dir = swapper_pg_dir;
@@ -55,7 +55,7 @@ void __init paging_init(void)
55 size = num_pages * sizeof(pte_t); 55 size = num_pages * sizeof(pte_t);
56 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); 56 size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
57 57
58 next_pgtable = (unsigned long)alloc_bootmem_pages(size); 58 next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE);
59 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; 59 bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
60 60
61 /* Map whole memory from PAGE_OFFSET (0x0E000000) */ 61 /* Map whole memory from PAGE_OFFSET (0x0E000000) */
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 7713c084d040..c884b760e52f 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -128,7 +128,7 @@ static pmd_t * __init one_md_table_init(pud_t *pud)
128 if (pud_none(*pud)) { 128 if (pud_none(*pud)) {
129 pmd_t *pmd; 129 pmd_t *pmd;
130 130
131 pmd = alloc_bootmem_pages(PAGE_SIZE); 131 pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
132 pud_populate(&init_mm, pud, pmd); 132 pud_populate(&init_mm, pud, pmd);
133 BUG_ON(pmd != pmd_offset(pud, 0)); 133 BUG_ON(pmd != pmd_offset(pud, 0));
134 } 134 }
@@ -141,7 +141,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
141 if (pmd_none(*pmd)) { 141 if (pmd_none(*pmd)) {
142 pte_t *pte; 142 pte_t *pte;
143 143
144 pte = alloc_bootmem_pages(PAGE_SIZE); 144 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
145 pmd_populate_kernel(&init_mm, pmd, pte); 145 pmd_populate_kernel(&init_mm, pmd, pte);
146 BUG_ON(pte != pte_offset_kernel(pmd, 0)); 146 BUG_ON(pte != pte_offset_kernel(pmd, 0));
147 } 147 }
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ff0d14cd9e82..e25118f8a726 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2621,7 +2621,8 @@ void __init io_apic_init_mappings(void)
2621#ifdef CONFIG_X86_32 2621#ifdef CONFIG_X86_32
2622fake_ioapic_page: 2622fake_ioapic_page:
2623#endif 2623#endif
2624 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); 2624 ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE,
2625 PAGE_SIZE);
2625 ioapic_phys = __pa(ioapic_phys); 2626 ioapic_phys = __pa(ioapic_phys);
2626 } 2627 }
2627 set_fixmap_nocache(idx, ioapic_phys); 2628 set_fixmap_nocache(idx, ioapic_phys);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index dd519f372169..f39b51244fe2 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -197,7 +197,7 @@ static __ref void *spp_getpage(void)
197 if (after_bootmem) 197 if (after_bootmem)
198 ptr = (void *) get_zeroed_page(GFP_ATOMIC); 198 ptr = (void *) get_zeroed_page(GFP_ATOMIC);
199 else 199 else
200 ptr = alloc_bootmem_pages(PAGE_SIZE); 200 ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
201 201
202 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) { 202 if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
203 panic("set_pte_phys: cannot allocate page data %s\n", 203 panic("set_pte_phys: cannot allocate page data %s\n",
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f5c1af4ce9ab..91a6208ec1a5 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -217,7 +217,8 @@ retry:
217 * Get IO TLB memory from any location. 217 * Get IO TLB memory from any location.
218 */ 218 */
219 if (early) 219 if (early)
220 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); 220 xen_io_tlb_start = memblock_alloc(PAGE_ALIGN(bytes),
221 PAGE_SIZE);
221 else { 222 else {
222#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) 223#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
223#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) 224#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)