aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2007-02-27 22:12:29 -0500
committerPaul Mackerras <paulus@samba.org>2007-03-07 23:43:28 -0500
commiteb6de2863750e696201780283e4c9ada19b4728e (patch)
tree18a4787a4857216313d5836523238e4a9ef14f9e /arch/powerpc
parent83ddcf5d364be7b1d8c214d2dd97753e1df589cd (diff)
[POWERPC] Allow duplicate lmb_reserve() calls
At present calling lmb_reserve() (and hence lmb_add_region()) twice for exactly the same memory region will cause strange behaviour. This makes life difficult when booting from a flat device tree with memory reserve map. Which regions are automatically reserved by the kernel has changed over time, so it's quite possible a newer kernel could attempt to auto-reserve a region which is also explicitly listed in the device tree's reserve map, leading to trouble. This patch avoids the problem by making lmb_reserve() ignore a call to reserve a previously reserved region. It also removes a now redundant test designed to avoid one specific case of the problem noted above. At present, this patch deals only with duplicate reservations of an identical region. Attempting to reserve two different, but overlapping regions will still cause problems. I might post another patch later dealing with this case, but I'm avoiding it now since it is substantially more complicated to deal with, less likely to occur and more likely to indicate a genuine bug elsewhere if it does occur. Signed-off-by: David Gibson <dwg@au1.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/prom.c3
-rw-r--r--arch/powerpc/mm/lmb.c4
2 files changed, 4 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8d52b23348bd..15ece3abfb37 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -954,9 +954,6 @@ static void __init early_reserve_mem(void)
954 size = *(reserve_map++); 954 size = *(reserve_map++);
955 if (size == 0) 955 if (size == 0)
956 break; 956 break;
957 /* skip if the reservation is for the blob */
958 if (base == self_base && size == self_size)
959 continue;
960 DBG("reserving: %llx -> %llx\n", base, size); 957 DBG("reserving: %llx -> %llx\n", base, size);
961 lmb_reserve(base, size); 958 lmb_reserve(base, size);
962 } 959 }
diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c
index 716a2906a24d..e3a1e8dc536a 100644
--- a/arch/powerpc/mm/lmb.c
+++ b/arch/powerpc/mm/lmb.c
@@ -146,6 +146,10 @@ static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base,
146 unsigned long rgnbase = rgn->region[i].base; 146 unsigned long rgnbase = rgn->region[i].base;
147 unsigned long rgnsize = rgn->region[i].size; 147 unsigned long rgnsize = rgn->region[i].size;
148 148
149 if ((rgnbase == base) && (rgnsize == size))
150 /* Already have this region, so we're done */
151 return 0;
152
149 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); 153 adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
150 if ( adjacent > 0 ) { 154 if ( adjacent > 0 ) {
151 rgn->region[i].base -= size; 155 rgn->region[i].base -= size;