aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2006-01-25 03:31:28 -0500
committerPaul Mackerras <paulus@samba.org>2006-02-07 06:38:34 -0500
commitd7a5b2ffa1352f0310630934a56aecbdfb617b72 (patch)
tree5ec2ed1d4e63d4bd2847531115fd263109a16830 /arch/powerpc/mm
parent2fb07d776b3c4473275eb225b6bf2a83755c9bfe (diff)
[PATCH] powerpc: Always panic if lmb_alloc() fails
Currently most callers of lmb_alloc() don't check if it worked or not, if it ever does weird bad things will probably happen. The few callers who do check just panic or BUG_ON. So make lmb_alloc() panic internally, to catch bugs at the source. The few callers who did check the result no longer need to. The only caller that did anything interesting with the return result was careful_allocation(). For it we create __lmb_alloc_base() which _doesn't_ panic automatically, a little messy, but passable. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/lmb.c14
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/numa.c4
-rw-r--r--arch/powerpc/mm/stab.c4
5 files changed, 16 insertions, 8 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 149351a84b94..95b4cd6b65e0 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -430,7 +430,6 @@ void __init htab_initialize(void)
430 * the absolute address space. 430 * the absolute address space.
431 */ 431 */
432 table = lmb_alloc(htab_size_bytes, htab_size_bytes); 432 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
433 BUG_ON(table == 0);
434 433
435 DBG("Hash table allocated at %lx, size: %lx\n", table, 434 DBG("Hash table allocated at %lx, size: %lx\n", table,
436 htab_size_bytes); 435 htab_size_bytes);
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index bbe3eac918e8..d9c76ce5fa8f 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -226,6 +226,20 @@ unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
226unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, 226unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
227 unsigned long max_addr) 227 unsigned long max_addr)
228{ 228{
229 unsigned long alloc;
230
231 alloc = __lmb_alloc_base(size, align, max_addr);
232
233 if (alloc < 0)
234 panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
235 size, max_addr);
236
237 return alloc;
238}
239
240unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
241 unsigned long max_addr)
242{
229 long i, j; 243 long i, j;
230 unsigned long base = 0; 244 unsigned long base = 0;
231 245
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 550517c2dd42..6809cdba6e94 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -249,7 +249,6 @@ void __init do_init_bootmem(void)
249 bootmap_pages = bootmem_bootmap_pages(total_pages); 249 bootmap_pages = bootmem_bootmap_pages(total_pages);
250 250
251 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 251 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
252 BUG_ON(!start);
253 252
254 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); 253 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
255 254
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 2863a912bcd0..da5280f8cf42 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -570,11 +570,11 @@ static void __init *careful_allocation(int nid, unsigned long size,
570 unsigned long end_pfn) 570 unsigned long end_pfn)
571{ 571{
572 int new_nid; 572 int new_nid;
573 unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); 573 unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
574 574
575 /* retry over all memory */ 575 /* retry over all memory */
576 if (!ret) 576 if (!ret)
577 ret = lmb_alloc_base(size, align, lmb_end_of_DRAM()); 577 ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
578 578
579 if (!ret) 579 if (!ret)
580 panic("numa.c: cannot allocate %lu bytes on node %d", 580 panic("numa.c: cannot allocate %lu bytes on node %d",
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 82e4951826bc..91d25fb27f89 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -247,10 +247,6 @@ void stabs_alloc(void)
247 247
248 newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, 248 newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
249 1<<SID_SHIFT); 249 1<<SID_SHIFT);
250 if (! newstab)
251 panic("Unable to allocate segment table for CPU %d.\n",
252 cpu);
253
254 newstab = (unsigned long)__va(newstab); 250 newstab = (unsigned long)__va(newstab);
255 251
256 memset((void *)newstab, 0, HW_PAGE_SIZE); 252 memset((void *)newstab, 0, HW_PAGE_SIZE);