aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2015-03-29 23:10:37 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2015-04-10 06:02:48 -0400
commitf691fa1080c37c48da0cdfeae082c3bef5df2643 (patch)
treefc961091a54c1be106c320108b231b340a979888 /arch/powerpc/mm
parent4f9c53c8cc76ed84e3bb0cca8c4ffa2b170d0239 (diff)
powerpc: Replace mem_init_done with slab_is_available()
We have a powerpc specific global called mem_init_done which is "set on boot once kmalloc can be called". But that's not *quite* true. We set it at the bottom of mem_init(), and rely on the fact that mm_init() calls kmem_cache_init() immediately after that, and nothing is running in parallel. So replace it with the generic and 100% correct slab_is_available(). Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/pgtable_32.c9
-rw-r--r--arch/powerpc/mm/pgtable_64.c4
3 files changed, 6 insertions, 10 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b7285a5870f8..45fda71feb27 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -61,7 +61,6 @@
61#define CPU_FTR_NOEXECUTE 0 61#define CPU_FTR_NOEXECUTE 0
62#endif 62#endif
63 63
64int mem_init_done;
65unsigned long long memory_limit; 64unsigned long long memory_limit;
66 65
67#ifdef CONFIG_HIGHMEM 66#ifdef CONFIG_HIGHMEM
@@ -377,8 +376,6 @@ void __init mem_init(void)
377 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n", 376 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
378 VMALLOC_START, VMALLOC_END); 377 VMALLOC_START, VMALLOC_END);
379#endif /* CONFIG_PPC32 */ 378#endif /* CONFIG_PPC32 */
380
381 mem_init_done = 1;
382} 379}
383 380
384void free_initmem(void) 381void free_initmem(void)
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 70b4752af54f..7692d1bb1bc6 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -107,9 +107,8 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
107__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 107__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
108{ 108{
109 pte_t *pte; 109 pte_t *pte;
110 extern int mem_init_done;
111 110
112 if (mem_init_done) { 111 if (slab_is_available()) {
113 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 112 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
114 } else { 113 } else {
115 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); 114 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
@@ -216,7 +215,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
216 * Don't allow anybody to remap normal RAM that we're using. 215 * Don't allow anybody to remap normal RAM that we're using.
217 * mem_init() sets high_memory so only do the check after that. 216 * mem_init() sets high_memory so only do the check after that.
218 */ 217 */
219 if (mem_init_done && (p < virt_to_phys(high_memory)) && 218 if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
220 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { 219 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
221 printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n", 220 printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
222 (unsigned long long)p, __builtin_return_address(0)); 221 (unsigned long long)p, __builtin_return_address(0));
@@ -244,7 +243,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
244 if ((v = p_mapped_by_tlbcam(p))) 243 if ((v = p_mapped_by_tlbcam(p)))
245 goto out; 244 goto out;
246 245
247 if (mem_init_done) { 246 if (slab_is_available()) {
248 struct vm_struct *area; 247 struct vm_struct *area;
249 area = get_vm_area_caller(size, VM_IOREMAP, caller); 248 area = get_vm_area_caller(size, VM_IOREMAP, caller);
250 if (area == 0) 249 if (area == 0)
@@ -263,7 +262,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
263 for (i = 0; i < size && err == 0; i += PAGE_SIZE) 262 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
264 err = map_page(v+i, p+i, flags); 263 err = map_page(v+i, p+i, flags);
265 if (err) { 264 if (err) {
266 if (mem_init_done) 265 if (slab_is_available())
267 vunmap((void *)v); 266 vunmap((void *)v);
268 return NULL; 267 return NULL;
269 } 268 }
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 3ac3a0a1edfb..59daa5eeec25 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -231,7 +231,7 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
231 if ((size == 0) || (paligned == 0)) 231 if ((size == 0) || (paligned == 0))
232 return NULL; 232 return NULL;
233 233
234 if (mem_init_done) { 234 if (slab_is_available()) {
235 struct vm_struct *area; 235 struct vm_struct *area;
236 236
237 area = __get_vm_area_caller(size, VM_IOREMAP, 237 area = __get_vm_area_caller(size, VM_IOREMAP,
@@ -315,7 +315,7 @@ void __iounmap(volatile void __iomem *token)
315{ 315{
316 void *addr; 316 void *addr;
317 317
318 if (!mem_init_done) 318 if (!slab_is_available())
319 return; 319 return;
320 320
321 addr = (void *) ((unsigned long __force) 321 addr = (void *) ((unsigned long __force)