aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Rapoport <rppt@linux.ibm.com>2019-03-07 19:30:48 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-07 21:32:03 -0500
commitf806714f7048715cc18f16ebe26a761e09b2f210 (patch)
treeec51f7bd9b080843b5dbb3f4fe5a1c5173d9336c
parent45ec975efb527625629d123f30597673889f52ca (diff)
powerpc: prefer memblock APIs returning virtual address
Patch series "memblock: simplify several early memory allocation", v4. These patches simplify some of the early memory allocations by replacing usage of older memblock APIs with newer and shinier ones. Quite a few places in the arch/ code allocated memory using a memblock API that returns a physical address of the allocated area, then converted this physical address to a virtual one and then used memset(0) to clear the allocated range. More recent memblock APIs do all the three steps in one call and their usage simplifies the code. It's important to note that regardless of API used, the core allocation is nearly identical for any set of memblock allocators: first it tries to find a free memory with all the constraints specified by the caller and then falls back to the allocation with some or all constraints disabled. The first three patches perform the conversion of call sites that have exact requirements for the node and the possible memory range. The fourth patch is a bit one-off as it simplifies openrisc's implementation of pte_alloc_one_kernel(), and not only the memblock usage. The fifth patch takes care of simpler cases when the allocation can be satisfied with a simple call to memblock_alloc(). The sixth patch removes one-liner wrappers for memblock_alloc on arm and unicore32, as suggested by Christoph. This patch (of 6): There are a several places that allocate memory using memblock APIs that return a physical address, convert the returned address to the virtual address and frequently also memset(0) the allocated range. Update these places to use memblock allocators already returning a virtual address. Use memblock functions that clear the allocated memory instead of calling memset(0) where appropriate. The calls to memblock_alloc_base() that were not followed by memset(0) are replaced with memblock_alloc_try_nid_raw(). Since the latter does not panic() when the allocation fails, the appropriate panic() calls are added to the call sites. Link: http://lkml.kernel.org/r/1546248566-14910-2-git-send-email-rppt@linux.ibm.com Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Greentime Hu <green.hu@gmail.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Jonas Bonn <jonas@southpole.se> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Hocko <mhocko@suse.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Mark Salter <msalter@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> Cc: Stafford Horne <shorne@gmail.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Cc: Christoph Hellwig <hch@infradead.org> Cc: Michal Simek <michal.simek@xilinx.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/powerpc/kernel/paca.c16
-rw-r--r--arch/powerpc/kernel/setup_64.c5
-rw-r--r--arch/powerpc/mm/hash_utils_64.c6
-rw-r--r--arch/powerpc/mm/pgtable-book3e.c8
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c5
-rw-r--r--arch/powerpc/mm/pgtable-radix.c25
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c5
-rw-r--r--arch/powerpc/platforms/pseries/setup.c18
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c7
9 files changed, 44 insertions, 51 deletions
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index b8480127793d..8c890c6557ed 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -28,7 +28,7 @@
28static void *__init alloc_paca_data(unsigned long size, unsigned long align, 28static void *__init alloc_paca_data(unsigned long size, unsigned long align,
29 unsigned long limit, int cpu) 29 unsigned long limit, int cpu)
30{ 30{
31 unsigned long pa; 31 void *ptr;
32 int nid; 32 int nid;
33 33
34 /* 34 /*
@@ -43,17 +43,15 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align,
43 nid = early_cpu_to_node(cpu); 43 nid = early_cpu_to_node(cpu);
44 } 44 }
45 45
46 pa = memblock_alloc_base_nid(size, align, limit, nid, MEMBLOCK_NONE); 46 ptr = memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT,
47 if (!pa) { 47 limit, nid);
48 pa = memblock_alloc_base(size, align, limit); 48 if (!ptr)
49 if (!pa) 49 panic("cannot allocate paca data");
50 panic("cannot allocate paca data");
51 }
52 50
53 if (cpu == boot_cpuid) 51 if (cpu == boot_cpuid)
54 memblock_set_bottom_up(false); 52 memblock_set_bottom_up(false);
55 53
56 return __va(pa); 54 return ptr;
57} 55}
58 56
59#ifdef CONFIG_PPC_PSERIES 57#ifdef CONFIG_PPC_PSERIES
@@ -119,7 +117,6 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
119 } 117 }
120 118
121 s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu); 119 s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
122 memset(s, 0, sizeof(*s));
123 120
124 s->persistent = cpu_to_be32(SLB_NUM_BOLTED); 121 s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
125 s->buffer_length = cpu_to_be32(sizeof(*s)); 122 s->buffer_length = cpu_to_be32(sizeof(*s));
@@ -223,7 +220,6 @@ void __init allocate_paca(int cpu)
223 paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES, 220 paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
224 limit, cpu); 221 limit, cpu);
225 paca_ptrs[cpu] = paca; 222 paca_ptrs[cpu] = paca;
226 memset(paca, 0, sizeof(struct paca_struct));
227 223
228 initialise_paca(paca, cpu); 224 initialise_paca(paca, cpu);
229#ifdef CONFIG_PPC_PSERIES 225#ifdef CONFIG_PPC_PSERIES
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 236c1151a3a7..5de413ae3cd6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -933,8 +933,9 @@ static void __ref init_fallback_flush(void)
933 * hardware prefetch runoff. We don't have a recipe for load patterns to 933 * hardware prefetch runoff. We don't have a recipe for load patterns to
934 * reliably avoid the prefetcher. 934 * reliably avoid the prefetcher.
935 */ 935 */
936 l1d_flush_fallback_area = __va(memblock_alloc_base(l1d_size * 2, l1d_size, limit)); 936 l1d_flush_fallback_area = memblock_alloc_try_nid(l1d_size * 2,
937 memset(l1d_flush_fallback_area, 0, l1d_size * 2); 937 l1d_size, MEMBLOCK_LOW_LIMIT,
938 limit, NUMA_NO_NODE);
938 939
939 for_each_possible_cpu(cpu) { 940 for_each_possible_cpu(cpu) {
940 struct paca_struct *paca = paca_ptrs[cpu]; 941 struct paca_struct *paca = paca_ptrs[cpu];
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 0cc7fbc3bd1c..bc6be44913d4 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -908,9 +908,9 @@ static void __init htab_initialize(void)
908#ifdef CONFIG_DEBUG_PAGEALLOC 908#ifdef CONFIG_DEBUG_PAGEALLOC
909 if (debug_pagealloc_enabled()) { 909 if (debug_pagealloc_enabled()) {
910 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; 910 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
911 linear_map_hash_slots = __va(memblock_alloc_base( 911 linear_map_hash_slots = memblock_alloc_try_nid(
912 linear_map_hash_count, 1, ppc64_rma_size)); 912 linear_map_hash_count, 1, MEMBLOCK_LOW_LIMIT,
913 memset(linear_map_hash_slots, 0, linear_map_hash_count); 913 ppc64_rma_size, NUMA_NO_NODE);
914 } 914 }
915#endif /* CONFIG_DEBUG_PAGEALLOC */ 915#endif /* CONFIG_DEBUG_PAGEALLOC */
916 916
diff --git a/arch/powerpc/mm/pgtable-book3e.c b/arch/powerpc/mm/pgtable-book3e.c
index e0ccf36714b2..53cbc7dc2df2 100644
--- a/arch/powerpc/mm/pgtable-book3e.c
+++ b/arch/powerpc/mm/pgtable-book3e.c
@@ -57,12 +57,8 @@ void vmemmap_remove_mapping(unsigned long start,
57 57
58static __ref void *early_alloc_pgtable(unsigned long size) 58static __ref void *early_alloc_pgtable(unsigned long size)
59{ 59{
60 void *pt; 60 return memblock_alloc_try_nid(size, size, MEMBLOCK_LOW_LIMIT,
61 61 __pa(MAX_DMA_ADDRESS), NUMA_NO_NODE);
62 pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS)));
63 memset(pt, 0, size);
64
65 return pt;
66} 62}
67 63
68/* 64/*
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index e7da590c7a78..92a3e4c39540 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -195,11 +195,8 @@ void __init mmu_partition_table_init(void)
195 unsigned long ptcr; 195 unsigned long ptcr;
196 196
197 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large."); 197 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
198 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
199 MEMBLOCK_ALLOC_ANYWHERE));
200
201 /* Initialize the Partition Table with no entries */ 198 /* Initialize the Partition Table with no entries */
202 memset((void *)partition_tb, 0, patb_size); 199 partition_tb = memblock_alloc(patb_size, patb_size);
203 200
204 /* 201 /*
205 * update partition table control register, 202 * update partition table control register,
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
index dced3cd241c2..e377684ac6ad 100644
--- a/arch/powerpc/mm/pgtable-radix.c
+++ b/arch/powerpc/mm/pgtable-radix.c
@@ -51,26 +51,15 @@ static int native_register_process_table(unsigned long base, unsigned long pg_sz
51static __ref void *early_alloc_pgtable(unsigned long size, int nid, 51static __ref void *early_alloc_pgtable(unsigned long size, int nid,
52 unsigned long region_start, unsigned long region_end) 52 unsigned long region_start, unsigned long region_end)
53{ 53{
54 unsigned long pa = 0; 54 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
55 void *pt; 55 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
56 56
57 if (region_start || region_end) /* has region hint */ 57 if (region_start)
58 pa = memblock_alloc_range(size, size, region_start, region_end, 58 min_addr = region_start;
59 MEMBLOCK_NONE); 59 if (region_end)
60 else if (nid != -1) /* has node hint */ 60 max_addr = region_end;
61 pa = memblock_alloc_base_nid(size, size,
62 MEMBLOCK_ALLOC_ANYWHERE,
63 nid, MEMBLOCK_NONE);
64 61
65 if (!pa) 62 return memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
66 pa = memblock_alloc_base(size, size, MEMBLOCK_ALLOC_ANYWHERE);
67
68 BUG_ON(!pa);
69
70 pt = __va(pa);
71 memset(pt, 0, size);
72
73 return pt;
74} 63}
75 64
76static int early_map_kernel_page(unsigned long ea, unsigned long pa, 65static int early_map_kernel_page(unsigned long ea, unsigned long pa,
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index f2971522fb4a..f62930f839ca 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -208,7 +208,9 @@ static int __init iob_init(struct device_node *dn)
208 pr_debug(" -> %s\n", __func__); 208 pr_debug(" -> %s\n", __func__);
209 209
210 /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */ 210 /* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
211 iob_l2_base = (u32 *)__va(memblock_alloc_base(1UL<<21, 1UL<<21, 0x80000000)); 211 iob_l2_base = memblock_alloc_try_nid_raw(1UL << 21, 1UL << 21,
212 MEMBLOCK_LOW_LIMIT, 0x80000000,
213 NUMA_NO_NODE);
212 214
213 pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base); 215 pr_info("IOBMAP L2 allocated at: %p\n", iob_l2_base);
214 216
@@ -269,4 +271,3 @@ void __init iommu_init_early_pasemi(void)
269 pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi; 271 pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi;
270 set_pci_dma_ops(&dma_iommu_ops); 272 set_pci_dma_ops(&dma_iommu_ops);
271} 273}
272
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 41f62ca27c63..e4f0dfd4ae33 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -130,8 +130,13 @@ static void __init fwnmi_init(void)
130 * It will be used in real mode mce handler, hence it needs to be 130 * It will be used in real mode mce handler, hence it needs to be
131 * below RMA. 131 * below RMA.
132 */ 132 */
133 mce_data_buf = __va(memblock_alloc_base(RTAS_ERROR_LOG_MAX * nr_cpus, 133 mce_data_buf = memblock_alloc_try_nid_raw(RTAS_ERROR_LOG_MAX * nr_cpus,
134 RTAS_ERROR_LOG_MAX, ppc64_rma_size)); 134 RTAS_ERROR_LOG_MAX, MEMBLOCK_LOW_LIMIT,
135 ppc64_rma_size, NUMA_NO_NODE);
136 if (!mce_data_buf)
137 panic("Failed to allocate %d bytes below %pa for MCE buffer\n",
138 RTAS_ERROR_LOG_MAX * nr_cpus, &ppc64_rma_size);
139
135 for_each_possible_cpu(i) { 140 for_each_possible_cpu(i) {
136 paca_ptrs[i]->mce_data_buf = mce_data_buf + 141 paca_ptrs[i]->mce_data_buf = mce_data_buf +
137 (RTAS_ERROR_LOG_MAX * i); 142 (RTAS_ERROR_LOG_MAX * i);
@@ -140,8 +145,13 @@ static void __init fwnmi_init(void)
140#ifdef CONFIG_PPC_BOOK3S_64 145#ifdef CONFIG_PPC_BOOK3S_64
141 /* Allocate per cpu slb area to save old slb contents during MCE */ 146 /* Allocate per cpu slb area to save old slb contents during MCE */
142 size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus; 147 size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus;
143 slb_ptr = __va(memblock_alloc_base(size, sizeof(struct slb_entry), 148 slb_ptr = memblock_alloc_try_nid_raw(size, sizeof(struct slb_entry),
144 ppc64_rma_size)); 149 MEMBLOCK_LOW_LIMIT, ppc64_rma_size,
150 NUMA_NO_NODE);
151 if (!slb_ptr)
152 panic("Failed to allocate %zu bytes below %pa for slb area\n",
153 size, &ppc64_rma_size);
154
145 for_each_possible_cpu(i) 155 for_each_possible_cpu(i)
146 paca_ptrs[i]->mce_faulty_slbs = slb_ptr + (mmu_slb_size * i); 156 paca_ptrs[i]->mce_faulty_slbs = slb_ptr + (mmu_slb_size * i);
147#endif 157#endif
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index a5b40d1460f1..25bc25fe0d93 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -251,8 +251,11 @@ static void allocate_dart(void)
251 * 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we 251 * 16MB (1 << 24) alignment. We allocate a full 16Mb chuck since we
252 * will blow up an entire large page anyway in the kernel mapping. 252 * will blow up an entire large page anyway in the kernel mapping.
253 */ 253 */
254 dart_tablebase = __va(memblock_alloc_base(1UL<<24, 254 dart_tablebase = memblock_alloc_try_nid_raw(SZ_16M, SZ_16M,
255 1UL<<24, 0x80000000L)); 255 MEMBLOCK_LOW_LIMIT, SZ_2G,
256 NUMA_NO_NODE);
257 if (!dart_tablebase)
258 panic("Failed to allocate 16MB below 2GB for DART table\n");
256 259
257 /* There is no point scanning the DART space for leaks*/ 260 /* There is no point scanning the DART space for leaks*/
258 kmemleak_no_scan((void *)dart_tablebase); 261 kmemleak_no_scan((void *)dart_tablebase);