aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2006-01-25 03:31:28 -0500
committerPaul Mackerras <paulus@samba.org>2006-02-07 06:38:34 -0500
commitd7a5b2ffa1352f0310630934a56aecbdfb617b72 (patch)
tree5ec2ed1d4e63d4bd2847531115fd263109a16830 /arch
parent2fb07d776b3c4473275eb225b6bf2a83755c9bfe (diff)
[PATCH] powerpc: Always panic if lmb_alloc() fails
Currently most callers of lmb_alloc() don't check if it worked or not, if it ever does weird bad things will probably happen. The few callers who do check just panic or BUG_ON. So make lmb_alloc() panic internally, to catch bugs at the source. The few callers who did check the result no longer need to. The only caller that did anything interesting with the return result was careful_allocation(). For it we create __lmb_alloc_base() which _doesn't_ panic automatically, a little messy, but passable. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/prom.c4
-rw-r--r--arch/powerpc/mm/hash_utils_64.c1
-rw-r--r--arch/powerpc/mm/lmb.c14
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/numa.c4
-rw-r--r--arch/powerpc/mm/stab.c4
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c2
7 files changed, 16 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 294832a7e0a6..82d117c60d7f 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -831,10 +831,6 @@ void __init unflatten_device_tree(void)
831 831
832 /* Allocate memory for the expanded device tree */ 832 /* Allocate memory for the expanded device tree */
833 mem = lmb_alloc(size + 4, __alignof__(struct device_node)); 833 mem = lmb_alloc(size + 4, __alignof__(struct device_node));
834 if (!mem) {
835 DBG("Couldn't allocate memory with lmb_alloc()!\n");
836 panic("Couldn't allocate memory with lmb_alloc()!\n");
837 }
838 mem = (unsigned long) __va(mem); 834 mem = (unsigned long) __va(mem);
839 835
840 ((u32 *)mem)[size / 4] = 0xdeadbeef; 836 ((u32 *)mem)[size / 4] = 0xdeadbeef;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 149351a84b94..95b4cd6b65e0 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -430,7 +430,6 @@ void __init htab_initialize(void)
430 * the absolute address space. 430 * the absolute address space.
431 */ 431 */
432 table = lmb_alloc(htab_size_bytes, htab_size_bytes); 432 table = lmb_alloc(htab_size_bytes, htab_size_bytes);
433 BUG_ON(table == 0);
434 433
435 DBG("Hash table allocated at %lx, size: %lx\n", table, 434 DBG("Hash table allocated at %lx, size: %lx\n", table,
436 htab_size_bytes); 435 htab_size_bytes);
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index bbe3eac918e8..d9c76ce5fa8f 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -226,6 +226,20 @@ unsigned long __init lmb_alloc(unsigned long size, unsigned long align)
226unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, 226unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align,
227 unsigned long max_addr) 227 unsigned long max_addr)
228{ 228{
229 unsigned long alloc;
230
231 alloc = __lmb_alloc_base(size, align, max_addr);
232
233 if (alloc < 0)
234 panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
235 size, max_addr);
236
237 return alloc;
238}
239
240unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align,
241 unsigned long max_addr)
242{
229 long i, j; 243 long i, j;
230 unsigned long base = 0; 244 unsigned long base = 0;
231 245
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 550517c2dd42..6809cdba6e94 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -249,7 +249,6 @@ void __init do_init_bootmem(void)
249 bootmap_pages = bootmem_bootmap_pages(total_pages); 249 bootmap_pages = bootmem_bootmap_pages(total_pages);
250 250
251 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE); 251 start = lmb_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
252 BUG_ON(!start);
253 252
254 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); 253 boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages);
255 254
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 2863a912bcd0..da5280f8cf42 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -570,11 +570,11 @@ static void __init *careful_allocation(int nid, unsigned long size,
570 unsigned long end_pfn) 570 unsigned long end_pfn)
571{ 571{
572 int new_nid; 572 int new_nid;
573 unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); 573 unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
574 574
575 /* retry over all memory */ 575 /* retry over all memory */
576 if (!ret) 576 if (!ret)
577 ret = lmb_alloc_base(size, align, lmb_end_of_DRAM()); 577 ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
578 578
579 if (!ret) 579 if (!ret)
580 panic("numa.c: cannot allocate %lu bytes on node %d", 580 panic("numa.c: cannot allocate %lu bytes on node %d",
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 82e4951826bc..91d25fb27f89 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -247,10 +247,6 @@ void stabs_alloc(void)
247 247
248 newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE, 248 newstab = lmb_alloc_base(HW_PAGE_SIZE, HW_PAGE_SIZE,
249 1<<SID_SHIFT); 249 1<<SID_SHIFT);
250 if (! newstab)
251 panic("Unable to allocate segment table for CPU %d.\n",
252 cpu);
253
254 newstab = (unsigned long)__va(newstab); 250 newstab = (unsigned long)__va(newstab);
255 251
256 memset((void *)newstab, 0, HW_PAGE_SIZE); 252 memset((void *)newstab, 0, HW_PAGE_SIZE);
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 977de9db8754..929ad2333aab 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -194,8 +194,6 @@ static int dart_init(struct device_node *dart_node)
194 * prefetching into invalid pages and corrupting data 194 * prefetching into invalid pages and corrupting data
195 */ 195 */
196 tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE); 196 tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE);
197 if (!tmp)
198 panic("DART: Cannot allocate spare page!");
199 dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & 197 dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) &
200 DARTMAP_RPNMASK); 198 DARTMAP_RPNMASK);
201 199