diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-07-06 18:39:02 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-08-04 22:56:08 -0400 |
commit | cd3db0c4ca3d237e7ad20f7107216e575705d2b0 (patch) | |
tree | 03be7c14bd68a568a6e2f6df2db9fbbdf11c1483 /arch/powerpc/mm | |
parent | e63075a3c9377536d085bc013cd3fe6323162449 (diff) |
memblock: Remove rmo_size, burry it in arch/powerpc where it belongs
The RMA (RMO is a misnomer) is a concept specific to ppc64 (in fact
server ppc64 though I hijack it on embedded ppc64 for similar purposes)
and represents the area of memory that can be accessed in real mode
(aka with MMU off), or on embedded, from the exception vectors (which
is bolted in the TLB) which pretty much boils down to the same thing.
We take that out of the generic MEMBLOCK data structure and move it into
arch/powerpc where it belongs, renaming it to "RMA" while at it.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/40x_mmu.c | 14 | ||||
-rw-r--r-- | arch/powerpc/mm/44x_mmu.c | 14 | ||||
-rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 9 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 22 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 14 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 15 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 14 |
8 files changed, 101 insertions, 2 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 58969b51f454..5810967511d4 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c | |||
@@ -141,7 +141,19 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
141 | * coverage with normal-sized pages (or other reasons) do not | 141 | * coverage with normal-sized pages (or other reasons) do not |
142 | * attempt to allocate outside the allowed range. | 142 | * attempt to allocate outside the allowed range. |
143 | */ | 143 | */ |
144 | memblock_set_current_limit(memstart_addr + mapped); | 144 | memblock_set_current_limit(mapped); |
145 | 145 | ||
146 | return mapped; | 146 | return mapped; |
147 | } | 147 | } |
148 | |||
149 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
150 | phys_addr_t first_memblock_size) | ||
151 | { | ||
152 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
153 | * physical on those processors | ||
154 | */ | ||
155 | BUG_ON(first_memblock_base != 0); | ||
156 | |||
157 | /* 40x can only access 16MB at the moment (see head_40x.S) */ | ||
158 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | ||
159 | } | ||
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index d8c6efb32bc6..024acab588fd 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c | |||
@@ -24,6 +24,8 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/memblock.h> | ||
28 | |||
27 | #include <asm/mmu.h> | 29 | #include <asm/mmu.h> |
28 | #include <asm/system.h> | 30 | #include <asm/system.h> |
29 | #include <asm/page.h> | 31 | #include <asm/page.h> |
@@ -213,6 +215,18 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
213 | return total_lowmem; | 215 | return total_lowmem; |
214 | } | 216 | } |
215 | 217 | ||
218 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
219 | phys_addr_t first_memblock_size) | ||
220 | { | ||
221 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
222 | * physical on those processors | ||
223 | */ | ||
224 | BUG_ON(first_memblock_base != 0); | ||
225 | |||
226 | /* 44x has a 256M TLB entry pinned at boot */ | ||
227 | memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE)); | ||
228 | } | ||
229 | |||
216 | #ifdef CONFIG_SMP | 230 | #ifdef CONFIG_SMP |
217 | void __cpuinit mmu_init_secondary(int cpu) | 231 | void __cpuinit mmu_init_secondary(int cpu) |
218 | { | 232 | { |
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index e525f862d759..0be8fe24c54e 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c | |||
@@ -215,3 +215,12 @@ void __init adjust_total_lowmem(void) | |||
215 | 215 | ||
216 | memblock_set_current_limit(memstart_addr + __max_low_memory); | 216 | memblock_set_current_limit(memstart_addr + __max_low_memory); |
217 | } | 217 | } |
218 | |||
219 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
220 | phys_addr_t first_memblock_size) | ||
221 | { | ||
222 | phys_addr_t limit = first_memblock_base + first_memblock_size; | ||
223 | |||
224 | /* 64M mapped initially according to head_fsl_booke.S */ | ||
225 | memblock_set_current_limit(min_t(u64, limit, 0x04000000)); | ||
226 | } | ||
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index b05890e23813..83f534d862db 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -649,7 +649,7 @@ static void __init htab_initialize(void) | |||
649 | #ifdef CONFIG_DEBUG_PAGEALLOC | 649 | #ifdef CONFIG_DEBUG_PAGEALLOC |
650 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; | 650 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; |
651 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, | 651 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, |
652 | 1, memblock.rmo_size)); | 652 | 1, ppc64_rma_size)); |
653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); | 653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); |
654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
655 | 655 | ||
@@ -1248,3 +1248,23 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
1248 | local_irq_restore(flags); | 1248 | local_irq_restore(flags); |
1249 | } | 1249 | } |
1250 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 1250 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
1251 | |||
1252 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
1253 | phys_addr_t first_memblock_size) | ||
1254 | { | ||
1255 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
1256 | * physical on those processors | ||
1257 | */ | ||
1258 | BUG_ON(first_memblock_base != 0); | ||
1259 | |||
1260 | /* On LPAR systems, the first entry is our RMA region, | ||
1261 | * non-LPAR 64-bit hash MMU systems don't have a limitation | ||
1262 | * on real mode access, but using the first entry works well | ||
1263 | * enough. We also clamp it to 1G to avoid some funky things | ||
1264 | * such as RTAS bugs etc... | ||
1265 | */ | ||
1266 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | ||
1267 | |||
1268 | /* Finally limit subsequent allocations */ | ||
1269 | memblock_set_current_limit(ppc64_rma_size); | ||
1270 | } | ||
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 59b208b7ec6f..742da43b4ab6 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
@@ -237,3 +237,17 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
237 | } | 237 | } |
238 | #endif | 238 | #endif |
239 | 239 | ||
240 | |||
241 | #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ | ||
242 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
243 | phys_addr_t first_memblock_size) | ||
244 | { | ||
245 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
246 | * physical on those processors | ||
247 | */ | ||
248 | BUG_ON(first_memblock_base != 0); | ||
249 | |||
250 | /* 8xx can only access 8MB at the moment */ | ||
251 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | ||
252 | } | ||
253 | #endif /* CONFIG_8xx */ | ||
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 71f1415e2472..9e081ffbf0f2 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -328,3 +328,4 @@ int __meminit vmemmap_populate(struct page *start_page, | |||
328 | return 0; | 328 | return 0; |
329 | } | 329 | } |
330 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 330 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
331 | |||
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index 7d34e170e80f..11571e118831 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c | |||
@@ -271,3 +271,18 @@ void __init MMU_init_hw(void) | |||
271 | 271 | ||
272 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); | 272 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); |
273 | } | 273 | } |
274 | |||
275 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
276 | phys_addr_t first_memblock_size) | ||
277 | { | ||
278 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
279 | * physical on those processors | ||
280 | */ | ||
281 | BUG_ON(first_memblock_base != 0); | ||
282 | |||
283 | /* 601 can only access 16MB at the moment */ | ||
284 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
285 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); | ||
286 | else /* Anything else has 256M mapped */ | ||
287 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); | ||
288 | } | ||
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 7ba32e762990..a086ed562606 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
@@ -446,4 +446,18 @@ void __cpuinit early_init_mmu_secondary(void) | |||
446 | __early_init_mmu(0); | 446 | __early_init_mmu(0); |
447 | } | 447 | } |
448 | 448 | ||
449 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
450 | phys_addr_t first_memblock_size) | ||
451 | { | ||
452 | /* On Embedded 64-bit, we adjust the RMA size to match | ||
453 | * the bolted TLB entry. We know for now that only 1G | ||
454 | * entries are supported though that may eventually | ||
455 | * change. We crop it to the size of the first MEMBLOCK to | ||
456 | * avoid going over total available memory just in case... | ||
457 | */ | ||
458 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | ||
459 | |||
460 | /* Finally limit subsequent allocations */ | ||
461 | memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size); | ||
462 | } | ||
449 | #endif /* CONFIG_PPC64 */ | 463 | #endif /* CONFIG_PPC64 */ |