aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/mmu.h12
-rw-r--r--arch/powerpc/kernel/head_40x.S6
-rw-r--r--arch/powerpc/kernel/paca.c2
-rw-r--r--arch/powerpc/kernel/prom.c29
-rw-r--r--arch/powerpc/kernel/rtas.c2
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/mm/40x_mmu.c14
-rw-r--r--arch/powerpc/mm/44x_mmu.c14
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c9
-rw-r--r--arch/powerpc/mm/hash_utils_64.c22
-rw-r--r--arch/powerpc/mm/init_32.c14
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c15
-rw-r--r--arch/powerpc/mm/tlb_nohash.c14
-rw-r--r--include/linux/memblock.h1
-rw-r--r--mm/memblock.c8
16 files changed, 125 insertions, 40 deletions
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 7ebf42ed84a2..bb40a06d3b77 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -2,6 +2,8 @@
2#define _ASM_POWERPC_MMU_H_ 2#define _ASM_POWERPC_MMU_H_
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/types.h>
6
5#include <asm/asm-compat.h> 7#include <asm/asm-compat.h>
6#include <asm/feature-fixups.h> 8#include <asm/feature-fixups.h>
7 9
@@ -82,6 +84,16 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
82extern void early_init_mmu(void); 84extern void early_init_mmu(void);
83extern void early_init_mmu_secondary(void); 85extern void early_init_mmu_secondary(void);
84 86
87extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
88 phys_addr_t first_memblock_size);
89
90#ifdef CONFIG_PPC64
91/* This is our real memory area size on ppc64 server, on embedded, we
92 * make it match the size our of bolted TLB area
93 */
94extern u64 ppc64_rma_size;
95#endif /* CONFIG_PPC64 */
96
85#endif /* !__ASSEMBLY__ */ 97#endif /* !__ASSEMBLY__ */
86 98
87/* The kernel use the constants below to index in the page sizes array. 99/* The kernel use the constants below to index in the page sizes array.
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index a90625f9b485..8278e8bad5a0 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -923,11 +923,7 @@ initial_mmu:
923 mtspr SPRN_PID,r0 923 mtspr SPRN_PID,r0
924 sync 924 sync
925 925
926 /* Configure and load two entries into TLB slots 62 and 63. 926 /* Configure and load one entry into TLB slots 63 */
927 * In case we are pinning TLBs, these are reserved in by the
928 * other TLB functions. If not reserving, then it doesn't
929 * matter where they are loaded.
930 */
931 clrrwi r4,r4,10 /* Mask off the real page number */ 927 clrrwi r4,r4,10 /* Mask off the real page number */
932 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ 928 ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
933 929
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 139a773853f4..b9ffd7deeed7 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -117,7 +117,7 @@ void __init allocate_pacas(void)
117 * the first segment. On iSeries they must be within the area mapped 117 * the first segment. On iSeries they must be within the area mapped
118 * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. 118 * by the HV, which is HvPagesToMap * HVPAGESIZE bytes.
119 */ 119 */
120 limit = min(0x10000000ULL, memblock.rmo_size); 120 limit = min(0x10000000ULL, ppc64_rma_size);
121 if (firmware_has_feature(FW_FEATURE_ISERIES)) 121 if (firmware_has_feature(FW_FEATURE_ISERIES))
122 limit = min(limit, HvPagesToMap * HVPAGESIZE); 122 limit = min(limit, HvPagesToMap * HVPAGESIZE);
123 123
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 3aec0b980f6a..c3c6a8857544 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -66,6 +66,7 @@
66int __initdata iommu_is_off; 66int __initdata iommu_is_off;
67int __initdata iommu_force_on; 67int __initdata iommu_force_on;
68unsigned long tce_alloc_start, tce_alloc_end; 68unsigned long tce_alloc_start, tce_alloc_end;
69u64 ppc64_rma_size;
69#endif 70#endif
70 71
71static int __init early_parse_mem(char *p) 72static int __init early_parse_mem(char *p)
@@ -492,7 +493,7 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node,
492 493
493void __init early_init_dt_add_memory_arch(u64 base, u64 size) 494void __init early_init_dt_add_memory_arch(u64 base, u64 size)
494{ 495{
495#if defined(CONFIG_PPC64) 496#ifdef CONFIG_PPC64
496 if (iommu_is_off) { 497 if (iommu_is_off) {
497 if (base >= 0x80000000ul) 498 if (base >= 0x80000000ul)
498 return; 499 return;
@@ -501,9 +502,13 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
501 } 502 }
502#endif 503#endif
503 504
504 memblock_add(base, size); 505 /* First MEMBLOCK added, do some special initializations */
505 506 if (memstart_addr == ~(phys_addr_t)0)
507 setup_initial_memory_limit(base, size);
506 memstart_addr = min((u64)memstart_addr, base); 508 memstart_addr = min((u64)memstart_addr, base);
509
510 /* Add the chunk to the MEMBLOCK list */
511 memblock_add(base, size);
507} 512}
508 513
509u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) 514u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
@@ -655,22 +660,6 @@ static void __init phyp_dump_reserve_mem(void)
655static inline void __init phyp_dump_reserve_mem(void) {} 660static inline void __init phyp_dump_reserve_mem(void) {}
656#endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ 661#endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
657 662
658static void set_boot_memory_limit(void)
659{
660#ifdef CONFIG_PPC32
661 /* 601 can only access 16MB at the moment */
662 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
663 memblock_set_current_limit(0x01000000);
664 /* 8xx can only access 8MB at the moment */
665 else if (PVR_VER(mfspr(SPRN_PVR)) == 0x50)
666 memblock_set_current_limit(0x00800000);
667 else
668 memblock_set_current_limit(0x10000000);
669#else
670 memblock_set_current_limit(memblock.rmo_size);
671#endif
672}
673
674void __init early_init_devtree(void *params) 663void __init early_init_devtree(void *params)
675{ 664{
676 phys_addr_t limit; 665 phys_addr_t limit;
@@ -734,8 +723,6 @@ void __init early_init_devtree(void *params)
734 723
735 DBG("Phys. mem: %llx\n", memblock_phys_mem_size()); 724 DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
736 725
737 set_boot_memory_limit();
738
739 /* We may need to relocate the flat tree, do it now. 726 /* We may need to relocate the flat tree, do it now.
740 * FIXME .. and the initrd too? */ 727 * FIXME .. and the initrd too? */
741 move_device_tree(); 728 move_device_tree();
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index d0516dbee762..1662777be5dd 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -934,7 +934,7 @@ void __init rtas_initialize(void)
934 */ 934 */
935#ifdef CONFIG_PPC64 935#ifdef CONFIG_PPC64
936 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { 936 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) {
937 rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); 937 rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX);
938 ibm_suspend_me_token = rtas_token("ibm,suspend-me"); 938 ibm_suspend_me_token = rtas_token("ibm,suspend-me");
939 } 939 }
940#endif 940#endif
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index d135f93cb0f6..4360944b60f0 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -487,7 +487,7 @@ static void __init emergency_stack_init(void)
487 * bringup, we need to get at them in real mode. This means they 487 * bringup, we need to get at them in real mode. This means they
488 * must also be within the RMO region. 488 * must also be within the RMO region.
489 */ 489 */
490 limit = min(slb0_limit(), memblock.rmo_size); 490 limit = min(slb0_limit(), ppc64_rma_size);
491 491
492 for_each_possible_cpu(i) { 492 for_each_possible_cpu(i) {
493 unsigned long sp; 493 unsigned long sp;
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c
index 58969b51f454..5810967511d4 100644
--- a/arch/powerpc/mm/40x_mmu.c
+++ b/arch/powerpc/mm/40x_mmu.c
@@ -141,7 +141,19 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
141 * coverage with normal-sized pages (or other reasons) do not 141 * coverage with normal-sized pages (or other reasons) do not
142 * attempt to allocate outside the allowed range. 142 * attempt to allocate outside the allowed range.
143 */ 143 */
144 memblock_set_current_limit(memstart_addr + mapped); 144 memblock_set_current_limit(mapped);
145 145
146 return mapped; 146 return mapped;
147} 147}
148
149void setup_initial_memory_limit(phys_addr_t first_memblock_base,
150 phys_addr_t first_memblock_size)
151{
152 /* We don't currently support the first MEMBLOCK not mapping 0
153 * physical on those processors
154 */
155 BUG_ON(first_memblock_base != 0);
156
157 /* 40x can only access 16MB at the moment (see head_40x.S) */
158 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
159}
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index d8c6efb32bc6..024acab588fd 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -24,6 +24,8 @@
24 */ 24 */
25 25
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/memblock.h>
28
27#include <asm/mmu.h> 29#include <asm/mmu.h>
28#include <asm/system.h> 30#include <asm/system.h>
29#include <asm/page.h> 31#include <asm/page.h>
@@ -213,6 +215,18 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
213 return total_lowmem; 215 return total_lowmem;
214} 216}
215 217
218void setup_initial_memory_limit(phys_addr_t first_memblock_base,
219 phys_addr_t first_memblock_size)
220{
221 /* We don't currently support the first MEMBLOCK not mapping 0
222 * physical on those processors
223 */
224 BUG_ON(first_memblock_base != 0);
225
226 /* 44x has a 256M TLB entry pinned at boot */
227 memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE));
228}
229
216#ifdef CONFIG_SMP 230#ifdef CONFIG_SMP
217void __cpuinit mmu_init_secondary(int cpu) 231void __cpuinit mmu_init_secondary(int cpu)
218{ 232{
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index e525f862d759..0be8fe24c54e 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -215,3 +215,12 @@ void __init adjust_total_lowmem(void)
215 215
216 memblock_set_current_limit(memstart_addr + __max_low_memory); 216 memblock_set_current_limit(memstart_addr + __max_low_memory);
217} 217}
218
219void setup_initial_memory_limit(phys_addr_t first_memblock_base,
220 phys_addr_t first_memblock_size)
221{
222 phys_addr_t limit = first_memblock_base + first_memblock_size;
223
224 /* 64M mapped initially according to head_fsl_booke.S */
225 memblock_set_current_limit(min_t(u64, limit, 0x04000000));
226}
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index b05890e23813..83f534d862db 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -649,7 +649,7 @@ static void __init htab_initialize(void)
649#ifdef CONFIG_DEBUG_PAGEALLOC 649#ifdef CONFIG_DEBUG_PAGEALLOC
650 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; 650 linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT;
651 linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, 651 linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count,
652 1, memblock.rmo_size)); 652 1, ppc64_rma_size));
653 memset(linear_map_hash_slots, 0, linear_map_hash_count); 653 memset(linear_map_hash_slots, 0, linear_map_hash_count);
654#endif /* CONFIG_DEBUG_PAGEALLOC */ 654#endif /* CONFIG_DEBUG_PAGEALLOC */
655 655
@@ -1248,3 +1248,23 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
1248 local_irq_restore(flags); 1248 local_irq_restore(flags);
1249} 1249}
1250#endif /* CONFIG_DEBUG_PAGEALLOC */ 1250#endif /* CONFIG_DEBUG_PAGEALLOC */
1251
1252void setup_initial_memory_limit(phys_addr_t first_memblock_base,
1253 phys_addr_t first_memblock_size)
1254{
1255 /* We don't currently support the first MEMBLOCK not mapping 0
1256 * physical on those processors
1257 */
1258 BUG_ON(first_memblock_base != 0);
1259
1260 /* On LPAR systems, the first entry is our RMA region,
1261 * non-LPAR 64-bit hash MMU systems don't have a limitation
1262 * on real mode access, but using the first entry works well
1263 * enough. We also clamp it to 1G to avoid some funky things
1264 * such as RTAS bugs etc...
1265 */
1266 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
1267
1268 /* Finally limit subsequent allocations */
1269 memblock_set_current_limit(ppc64_rma_size);
1270}
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 59b208b7ec6f..742da43b4ab6 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -237,3 +237,17 @@ void free_initrd_mem(unsigned long start, unsigned long end)
237} 237}
238#endif 238#endif
239 239
240
241#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
242void setup_initial_memory_limit(phys_addr_t first_memblock_base,
243 phys_addr_t first_memblock_size)
244{
245 /* We don't currently support the first MEMBLOCK not mapping 0
246 * physical on those processors
247 */
248 BUG_ON(first_memblock_base != 0);
249
250 /* 8xx can only access 8MB at the moment */
251 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
252}
253#endif /* CONFIG_8xx */
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 71f1415e2472..9e081ffbf0f2 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -328,3 +328,4 @@ int __meminit vmemmap_populate(struct page *start_page,
328 return 0; 328 return 0;
329} 329}
330#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 330#endif /* CONFIG_SPARSEMEM_VMEMMAP */
331
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 7d34e170e80f..11571e118831 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -271,3 +271,18 @@ void __init MMU_init_hw(void)
271 271
272 if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); 272 if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205);
273} 273}
274
275void setup_initial_memory_limit(phys_addr_t first_memblock_base,
276 phys_addr_t first_memblock_size)
277{
278 /* We don't currently support the first MEMBLOCK not mapping 0
279 * physical on those processors
280 */
281 BUG_ON(first_memblock_base != 0);
282
283 /* 601 can only access 16MB at the moment */
284 if (PVR_VER(mfspr(SPRN_PVR)) == 1)
285 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000));
286 else /* Anything else has 256M mapped */
287 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000));
288}
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 7ba32e762990..a086ed562606 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -446,4 +446,18 @@ void __cpuinit early_init_mmu_secondary(void)
446 __early_init_mmu(0); 446 __early_init_mmu(0);
447} 447}
448 448
449void setup_initial_memory_limit(phys_addr_t first_memblock_base,
450 phys_addr_t first_memblock_size)
451{
452 /* On Embedded 64-bit, we adjust the RMA size to match
453 * the bolted TLB entry. We know for now that only 1G
454 * entries are supported though that may eventually
455 * change. We crop it to the size of the first MEMBLOCK to
456 * avoid going over total available memory just in case...
457 */
458 ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
459
460 /* Finally limit subsequent allocations */
461 memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size);
462}
449#endif /* CONFIG_PPC64 */ 463#endif /* CONFIG_PPC64 */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index c4f6e53264ed..71b8edc6ede8 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -33,7 +33,6 @@ struct memblock_type {
33 33
34struct memblock { 34struct memblock {
35 unsigned long debug; 35 unsigned long debug;
36 u64 rmo_size;
37 u64 current_limit; 36 u64 current_limit;
38 struct memblock_type memory; 37 struct memblock_type memory;
39 struct memblock_type reserved; 38 struct memblock_type reserved;
diff --git a/mm/memblock.c b/mm/memblock.c
index 770c5bfac2cd..73d903ebf3d4 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -49,7 +49,6 @@ void memblock_dump_all(void)
49 return; 49 return;
50 50
51 pr_info("MEMBLOCK configuration:\n"); 51 pr_info("MEMBLOCK configuration:\n");
52 pr_info(" rmo_size = 0x%llx\n", (unsigned long long)memblock.rmo_size);
53 pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); 52 pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size);
54 53
55 memblock_dump(&memblock.memory, "memory"); 54 memblock_dump(&memblock.memory, "memory");
@@ -195,10 +194,6 @@ static long memblock_add_region(struct memblock_type *type, u64 base, u64 size)
195 194
196long memblock_add(u64 base, u64 size) 195long memblock_add(u64 base, u64 size)
197{ 196{
198 /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */
199 if (base == 0)
200 memblock.rmo_size = size;
201
202 return memblock_add_region(&memblock.memory, base, size); 197 return memblock_add_region(&memblock.memory, base, size);
203 198
204} 199}
@@ -459,9 +454,6 @@ void __init memblock_enforce_memory_limit(u64 memory_limit)
459 break; 454 break;
460 } 455 }
461 456
462 if (memblock.memory.regions[0].size < memblock.rmo_size)
463 memblock.rmo_size = memblock.memory.regions[0].size;
464
465 memory_limit = memblock_end_of_DRAM(); 457 memory_limit = memblock_end_of_DRAM();
466 458
467 /* And truncate any reserves above the limit also. */ 459 /* And truncate any reserves above the limit also. */