diff options
Diffstat (limited to 'arch/powerpc')
| -rw-r--r-- | arch/powerpc/include/asm/memblock.h | 7 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/mmu.h | 12 | ||||
| -rw-r--r-- | arch/powerpc/kernel/head_40x.S | 6 | ||||
| -rw-r--r-- | arch/powerpc/kernel/paca.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/prom.c | 15 | ||||
| -rw-r--r-- | arch/powerpc/kernel/rtas.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/setup_32.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/setup_64.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/mm/40x_mmu.c | 17 | ||||
| -rw-r--r-- | arch/powerpc/mm/44x_mmu.c | 14 | ||||
| -rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 12 | ||||
| -rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 35 | ||||
| -rw-r--r-- | arch/powerpc/mm/init_32.c | 43 | ||||
| -rw-r--r-- | arch/powerpc/mm/init_64.c | 1 | ||||
| -rw-r--r-- | arch/powerpc/mm/mem.c | 94 | ||||
| -rw-r--r-- | arch/powerpc/mm/numa.c | 17 | ||||
| -rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 18 | ||||
| -rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 16 | ||||
| -rw-r--r-- | arch/powerpc/platforms/embedded6xx/wii.c | 2 |
19 files changed, 193 insertions, 124 deletions
diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h index 3c29728b56b1..43efc345065e 100644 --- a/arch/powerpc/include/asm/memblock.h +++ b/arch/powerpc/include/asm/memblock.h | |||
| @@ -5,11 +5,4 @@ | |||
| 5 | 5 | ||
| 6 | #define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) | 6 | #define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) |
| 7 | 7 | ||
| 8 | #ifdef CONFIG_PPC32 | ||
| 9 | extern phys_addr_t lowmem_end_addr; | ||
| 10 | #define MEMBLOCK_REAL_LIMIT lowmem_end_addr | ||
| 11 | #else | ||
| 12 | #define MEMBLOCK_REAL_LIMIT 0 | ||
| 13 | #endif | ||
| 14 | |||
| 15 | #endif /* _ASM_POWERPC_MEMBLOCK_H */ | 8 | #endif /* _ASM_POWERPC_MEMBLOCK_H */ |
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 7ebf42ed84a2..bb40a06d3b77 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h | |||
| @@ -2,6 +2,8 @@ | |||
| 2 | #define _ASM_POWERPC_MMU_H_ | 2 | #define _ASM_POWERPC_MMU_H_ |
| 3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
| 4 | 4 | ||
| 5 | #include <linux/types.h> | ||
| 6 | |||
| 5 | #include <asm/asm-compat.h> | 7 | #include <asm/asm-compat.h> |
| 6 | #include <asm/feature-fixups.h> | 8 | #include <asm/feature-fixups.h> |
| 7 | 9 | ||
| @@ -82,6 +84,16 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; | |||
| 82 | extern void early_init_mmu(void); | 84 | extern void early_init_mmu(void); |
| 83 | extern void early_init_mmu_secondary(void); | 85 | extern void early_init_mmu_secondary(void); |
| 84 | 86 | ||
| 87 | extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 88 | phys_addr_t first_memblock_size); | ||
| 89 | |||
| 90 | #ifdef CONFIG_PPC64 | ||
| 91 | /* This is our real memory area size on ppc64 server, on embedded, we | ||
| 92 | * make it match the size our of bolted TLB area | ||
| 93 | */ | ||
| 94 | extern u64 ppc64_rma_size; | ||
| 95 | #endif /* CONFIG_PPC64 */ | ||
| 96 | |||
| 85 | #endif /* !__ASSEMBLY__ */ | 97 | #endif /* !__ASSEMBLY__ */ |
| 86 | 98 | ||
| 87 | /* The kernel use the constants below to index in the page sizes array. | 99 | /* The kernel use the constants below to index in the page sizes array. |
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a90625f9b485..8278e8bad5a0 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S | |||
| @@ -923,11 +923,7 @@ initial_mmu: | |||
| 923 | mtspr SPRN_PID,r0 | 923 | mtspr SPRN_PID,r0 |
| 924 | sync | 924 | sync |
| 925 | 925 | ||
| 926 | /* Configure and load two entries into TLB slots 62 and 63. | 926 | /* Configure and load one entry into TLB slots 63 */ |
| 927 | * In case we are pinning TLBs, these are reserved in by the | ||
| 928 | * other TLB functions. If not reserving, then it doesn't | ||
| 929 | * matter where they are loaded. | ||
| 930 | */ | ||
| 931 | clrrwi r4,r4,10 /* Mask off the real page number */ | 927 | clrrwi r4,r4,10 /* Mask off the real page number */ |
| 932 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ | 928 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ |
| 933 | 929 | ||
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index d0a26f1770fe..a4e72159234f 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
| @@ -127,7 +127,7 @@ void __init allocate_pacas(void) | |||
| 127 | * the first segment. On iSeries they must be within the area mapped | 127 | * the first segment. On iSeries they must be within the area mapped |
| 128 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. | 128 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. |
| 129 | */ | 129 | */ |
| 130 | limit = min(0x10000000ULL, memblock.rmo_size); | 130 | limit = min(0x10000000ULL, ppc64_rma_size); |
| 131 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 131 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
| 132 | limit = min(limit, HvPagesToMap * HVPAGESIZE); | 132 | limit = min(limit, HvPagesToMap * HVPAGESIZE); |
| 133 | 133 | ||
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index fed9bf6187d1..c3c6a8857544 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
| @@ -66,6 +66,7 @@ | |||
| 66 | int __initdata iommu_is_off; | 66 | int __initdata iommu_is_off; |
| 67 | int __initdata iommu_force_on; | 67 | int __initdata iommu_force_on; |
| 68 | unsigned long tce_alloc_start, tce_alloc_end; | 68 | unsigned long tce_alloc_start, tce_alloc_end; |
| 69 | u64 ppc64_rma_size; | ||
| 69 | #endif | 70 | #endif |
| 70 | 71 | ||
| 71 | static int __init early_parse_mem(char *p) | 72 | static int __init early_parse_mem(char *p) |
| @@ -98,7 +99,7 @@ static void __init move_device_tree(void) | |||
| 98 | 99 | ||
| 99 | if ((memory_limit && (start + size) > memory_limit) || | 100 | if ((memory_limit && (start + size) > memory_limit) || |
| 100 | overlaps_crashkernel(start, size)) { | 101 | overlaps_crashkernel(start, size)) { |
| 101 | p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size)); | 102 | p = __va(memblock_alloc(size, PAGE_SIZE)); |
| 102 | memcpy(p, initial_boot_params, size); | 103 | memcpy(p, initial_boot_params, size); |
| 103 | initial_boot_params = (struct boot_param_header *)p; | 104 | initial_boot_params = (struct boot_param_header *)p; |
| 104 | DBG("Moved device tree to 0x%p\n", p); | 105 | DBG("Moved device tree to 0x%p\n", p); |
| @@ -492,7 +493,7 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node, | |||
| 492 | 493 | ||
| 493 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | 494 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) |
| 494 | { | 495 | { |
| 495 | #if defined(CONFIG_PPC64) | 496 | #ifdef CONFIG_PPC64 |
| 496 | if (iommu_is_off) { | 497 | if (iommu_is_off) { |
| 497 | if (base >= 0x80000000ul) | 498 | if (base >= 0x80000000ul) |
| 498 | return; | 499 | return; |
| @@ -501,9 +502,13 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) | |||
| 501 | } | 502 | } |
| 502 | #endif | 503 | #endif |
| 503 | 504 | ||
| 504 | memblock_add(base, size); | 505 | /* First MEMBLOCK added, do some special initializations */ |
| 505 | 506 | if (memstart_addr == ~(phys_addr_t)0) | |
| 507 | setup_initial_memory_limit(base, size); | ||
| 506 | memstart_addr = min((u64)memstart_addr, base); | 508 | memstart_addr = min((u64)memstart_addr, base); |
| 509 | |||
| 510 | /* Add the chunk to the MEMBLOCK list */ | ||
| 511 | memblock_add(base, size); | ||
| 507 | } | 512 | } |
| 508 | 513 | ||
| 509 | u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | 514 | u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) |
| @@ -655,7 +660,6 @@ static void __init phyp_dump_reserve_mem(void) | |||
| 655 | static inline void __init phyp_dump_reserve_mem(void) {} | 660 | static inline void __init phyp_dump_reserve_mem(void) {} |
| 656 | #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ | 661 | #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ |
| 657 | 662 | ||
| 658 | |||
| 659 | void __init early_init_devtree(void *params) | 663 | void __init early_init_devtree(void *params) |
| 660 | { | 664 | { |
| 661 | phys_addr_t limit; | 665 | phys_addr_t limit; |
| @@ -683,6 +687,7 @@ void __init early_init_devtree(void *params) | |||
| 683 | 687 | ||
| 684 | /* Scan memory nodes and rebuild MEMBLOCKs */ | 688 | /* Scan memory nodes and rebuild MEMBLOCKs */ |
| 685 | memblock_init(); | 689 | memblock_init(); |
| 690 | |||
| 686 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | 691 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
| 687 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); | 692 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); |
| 688 | 693 | ||
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 41048de3c6c3..7333fdbf857b 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
| @@ -969,7 +969,7 @@ void __init rtas_initialize(void) | |||
| 969 | */ | 969 | */ |
| 970 | #ifdef CONFIG_PPC64 | 970 | #ifdef CONFIG_PPC64 |
| 971 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { | 971 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { |
| 972 | rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); | 972 | rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); |
| 973 | ibm_suspend_me_token = rtas_token("ibm,suspend-me"); | 973 | ibm_suspend_me_token = rtas_token("ibm,suspend-me"); |
| 974 | } | 974 | } |
| 975 | #endif | 975 | #endif |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 93666f9cabf1..b86111fe9257 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
| @@ -246,7 +246,7 @@ static void __init irqstack_early_init(void) | |||
| 246 | unsigned int i; | 246 | unsigned int i; |
| 247 | 247 | ||
| 248 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 | 248 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 |
| 249 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ | 249 | * as the memblock is limited to lowmem by default */ |
| 250 | for_each_possible_cpu(i) { | 250 | for_each_possible_cpu(i) { |
| 251 | softirq_ctx[i] = (struct thread_info *) | 251 | softirq_ctx[i] = (struct thread_info *) |
| 252 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); | 252 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index e72690ec9b87..2a178b0ebcdf 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
| @@ -486,7 +486,7 @@ static void __init emergency_stack_init(void) | |||
| 486 | * bringup, we need to get at them in real mode. This means they | 486 | * bringup, we need to get at them in real mode. This means they |
| 487 | * must also be within the RMO region. | 487 | * must also be within the RMO region. |
| 488 | */ | 488 | */ |
| 489 | limit = min(slb0_limit(), memblock.rmo_size); | 489 | limit = min(slb0_limit(), ppc64_rma_size); |
| 490 | 490 | ||
| 491 | for_each_possible_cpu(i) { | 491 | for_each_possible_cpu(i) { |
| 492 | unsigned long sp; | 492 | unsigned long sp; |
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 1dc2fa5ce1bd..5810967511d4 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
| 36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
| 37 | #include <linux/highmem.h> | 37 | #include <linux/highmem.h> |
| 38 | #include <linux/memblock.h> | ||
| 38 | 39 | ||
| 39 | #include <asm/pgalloc.h> | 40 | #include <asm/pgalloc.h> |
| 40 | #include <asm/prom.h> | 41 | #include <asm/prom.h> |
| @@ -47,6 +48,7 @@ | |||
| 47 | #include <asm/bootx.h> | 48 | #include <asm/bootx.h> |
| 48 | #include <asm/machdep.h> | 49 | #include <asm/machdep.h> |
| 49 | #include <asm/setup.h> | 50 | #include <asm/setup.h> |
| 51 | |||
| 50 | #include "mmu_decl.h" | 52 | #include "mmu_decl.h" |
| 51 | 53 | ||
| 52 | extern int __map_without_ltlbs; | 54 | extern int __map_without_ltlbs; |
| @@ -139,8 +141,19 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
| 139 | * coverage with normal-sized pages (or other reasons) do not | 141 | * coverage with normal-sized pages (or other reasons) do not |
| 140 | * attempt to allocate outside the allowed range. | 142 | * attempt to allocate outside the allowed range. |
| 141 | */ | 143 | */ |
| 142 | 144 | memblock_set_current_limit(mapped); | |
| 143 | __initial_memory_limit_addr = memstart_addr + mapped; | ||
| 144 | 145 | ||
| 145 | return mapped; | 146 | return mapped; |
| 146 | } | 147 | } |
| 148 | |||
| 149 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 150 | phys_addr_t first_memblock_size) | ||
| 151 | { | ||
| 152 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
| 153 | * physical on those processors | ||
| 154 | */ | ||
| 155 | BUG_ON(first_memblock_base != 0); | ||
| 156 | |||
| 157 | /* 40x can only access 16MB at the moment (see head_40x.S) */ | ||
| 158 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | ||
| 159 | } | ||
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index d8c6efb32bc6..024acab588fd 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c | |||
| @@ -24,6 +24,8 @@ | |||
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
| 27 | #include <linux/memblock.h> | ||
| 28 | |||
| 27 | #include <asm/mmu.h> | 29 | #include <asm/mmu.h> |
| 28 | #include <asm/system.h> | 30 | #include <asm/system.h> |
| 29 | #include <asm/page.h> | 31 | #include <asm/page.h> |
| @@ -213,6 +215,18 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
| 213 | return total_lowmem; | 215 | return total_lowmem; |
| 214 | } | 216 | } |
| 215 | 217 | ||
| 218 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 219 | phys_addr_t first_memblock_size) | ||
| 220 | { | ||
| 221 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
| 222 | * physical on those processors | ||
| 223 | */ | ||
| 224 | BUG_ON(first_memblock_base != 0); | ||
| 225 | |||
| 226 | /* 44x has a 256M TLB entry pinned at boot */ | ||
| 227 | memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE)); | ||
| 228 | } | ||
| 229 | |||
| 216 | #ifdef CONFIG_SMP | 230 | #ifdef CONFIG_SMP |
| 217 | void __cpuinit mmu_init_secondary(int cpu) | 231 | void __cpuinit mmu_init_secondary(int cpu) |
| 218 | { | 232 | { |
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 4b66a1ece6d8..cde270847e7c 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/init.h> | 40 | #include <linux/init.h> |
| 41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
| 42 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
| 43 | #include <linux/memblock.h> | ||
| 43 | 44 | ||
| 44 | #include <asm/pgalloc.h> | 45 | #include <asm/pgalloc.h> |
| 45 | #include <asm/prom.h> | 46 | #include <asm/prom.h> |
| @@ -213,5 +214,14 @@ void __init adjust_total_lowmem(void) | |||
| 213 | pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, | 214 | pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, |
| 214 | (unsigned int)((total_lowmem - __max_low_memory) >> 20)); | 215 | (unsigned int)((total_lowmem - __max_low_memory) >> 20)); |
| 215 | 216 | ||
| 216 | __initial_memory_limit_addr = memstart_addr + __max_low_memory; | 217 | memblock_set_current_limit(memstart_addr + __max_low_memory); |
| 218 | } | ||
| 219 | |||
| 220 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 221 | phys_addr_t first_memblock_size) | ||
| 222 | { | ||
| 223 | phys_addr_t limit = first_memblock_base + first_memblock_size; | ||
| 224 | |||
| 225 | /* 64M mapped initially according to head_fsl_booke.S */ | ||
| 226 | memblock_set_current_limit(min_t(u64, limit, 0x04000000)); | ||
| 217 | } | 227 | } |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 09dffe6efa46..83f534d862db 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
| @@ -588,7 +588,7 @@ static void __init htab_initialize(void) | |||
| 588 | unsigned long pteg_count; | 588 | unsigned long pteg_count; |
| 589 | unsigned long prot; | 589 | unsigned long prot; |
| 590 | unsigned long base = 0, size = 0, limit; | 590 | unsigned long base = 0, size = 0, limit; |
| 591 | int i; | 591 | struct memblock_region *reg; |
| 592 | 592 | ||
| 593 | DBG(" -> htab_initialize()\n"); | 593 | DBG(" -> htab_initialize()\n"); |
| 594 | 594 | ||
| @@ -625,7 +625,7 @@ static void __init htab_initialize(void) | |||
| 625 | if (machine_is(cell)) | 625 | if (machine_is(cell)) |
| 626 | limit = 0x80000000; | 626 | limit = 0x80000000; |
| 627 | else | 627 | else |
| 628 | limit = 0; | 628 | limit = MEMBLOCK_ALLOC_ANYWHERE; |
| 629 | 629 | ||
| 630 | table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); | 630 | table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); |
| 631 | 631 | ||
| @@ -649,7 +649,7 @@ static void __init htab_initialize(void) | |||
| 649 | #ifdef CONFIG_DEBUG_PAGEALLOC | 649 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 650 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; | 650 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; |
| 651 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, | 651 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, |
| 652 | 1, memblock.rmo_size)); | 652 | 1, ppc64_rma_size)); |
| 653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); | 653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); |
| 654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
| 655 | 655 | ||
| @@ -659,9 +659,9 @@ static void __init htab_initialize(void) | |||
| 659 | */ | 659 | */ |
| 660 | 660 | ||
| 661 | /* create bolted the linear mapping in the hash table */ | 661 | /* create bolted the linear mapping in the hash table */ |
| 662 | for (i=0; i < memblock.memory.cnt; i++) { | 662 | for_each_memblock(memory, reg) { |
| 663 | base = (unsigned long)__va(memblock.memory.region[i].base); | 663 | base = (unsigned long)__va(reg->base); |
| 664 | size = memblock.memory.region[i].size; | 664 | size = reg->size; |
| 665 | 665 | ||
| 666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", | 666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", |
| 667 | base, size, prot); | 667 | base, size, prot); |
| @@ -696,7 +696,8 @@ static void __init htab_initialize(void) | |||
| 696 | #endif /* CONFIG_U3_DART */ | 696 | #endif /* CONFIG_U3_DART */ |
| 697 | BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), | 697 | BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), |
| 698 | prot, mmu_linear_psize, mmu_kernel_ssize)); | 698 | prot, mmu_linear_psize, mmu_kernel_ssize)); |
| 699 | } | 699 | } |
| 700 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); | ||
| 700 | 701 | ||
| 701 | /* | 702 | /* |
| 702 | * If we have a memory_limit and we've allocated TCEs then we need to | 703 | * If we have a memory_limit and we've allocated TCEs then we need to |
| @@ -1247,3 +1248,23 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
| 1247 | local_irq_restore(flags); | 1248 | local_irq_restore(flags); |
| 1248 | } | 1249 | } |
| 1249 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 1250 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
| 1251 | |||
| 1252 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 1253 | phys_addr_t first_memblock_size) | ||
| 1254 | { | ||
| 1255 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
| 1256 | * physical on those processors | ||
| 1257 | */ | ||
| 1258 | BUG_ON(first_memblock_base != 0); | ||
| 1259 | |||
| 1260 | /* On LPAR systems, the first entry is our RMA region, | ||
| 1261 | * non-LPAR 64-bit hash MMU systems don't have a limitation | ||
| 1262 | * on real mode access, but using the first entry works well | ||
| 1263 | * enough. We also clamp it to 1G to avoid some funky things | ||
| 1264 | * such as RTAS bugs etc... | ||
| 1265 | */ | ||
| 1266 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | ||
| 1267 | |||
| 1268 | /* Finally limit subsequent allocations */ | ||
| 1269 | memblock_set_current_limit(ppc64_rma_size); | ||
| 1270 | } | ||
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 6a6975dc2654..742da43b4ab6 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
| @@ -92,12 +92,6 @@ int __allow_ioremap_reserved; | |||
| 92 | unsigned long __max_low_memory = MAX_LOW_MEM; | 92 | unsigned long __max_low_memory = MAX_LOW_MEM; |
| 93 | 93 | ||
| 94 | /* | 94 | /* |
| 95 | * address of the limit of what is accessible with initial MMU setup - | ||
| 96 | * 256MB usually, but only 16MB on 601. | ||
| 97 | */ | ||
| 98 | phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000; | ||
| 99 | |||
| 100 | /* | ||
| 101 | * Check for command-line options that affect what MMU_init will do. | 95 | * Check for command-line options that affect what MMU_init will do. |
| 102 | */ | 96 | */ |
| 103 | void MMU_setup(void) | 97 | void MMU_setup(void) |
| @@ -126,13 +120,6 @@ void __init MMU_init(void) | |||
| 126 | if (ppc_md.progress) | 120 | if (ppc_md.progress) |
| 127 | ppc_md.progress("MMU:enter", 0x111); | 121 | ppc_md.progress("MMU:enter", 0x111); |
| 128 | 122 | ||
| 129 | /* 601 can only access 16MB at the moment */ | ||
| 130 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
| 131 | __initial_memory_limit_addr = 0x01000000; | ||
| 132 | /* 8xx can only access 8MB at the moment */ | ||
| 133 | if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) | ||
| 134 | __initial_memory_limit_addr = 0x00800000; | ||
| 135 | |||
| 136 | /* parse args from command line */ | 123 | /* parse args from command line */ |
| 137 | MMU_setup(); | 124 | MMU_setup(); |
| 138 | 125 | ||
| @@ -190,20 +177,18 @@ void __init MMU_init(void) | |||
| 190 | #ifdef CONFIG_BOOTX_TEXT | 177 | #ifdef CONFIG_BOOTX_TEXT |
| 191 | btext_unmap(); | 178 | btext_unmap(); |
| 192 | #endif | 179 | #endif |
| 180 | |||
| 181 | /* Shortly after that, the entire linear mapping will be available */ | ||
| 182 | memblock_set_current_limit(lowmem_end_addr); | ||
| 193 | } | 183 | } |
| 194 | 184 | ||
| 195 | /* This is only called until mem_init is done. */ | 185 | /* This is only called until mem_init is done. */ |
| 196 | void __init *early_get_page(void) | 186 | void __init *early_get_page(void) |
| 197 | { | 187 | { |
| 198 | void *p; | 188 | if (init_bootmem_done) |
| 199 | 189 | return alloc_bootmem_pages(PAGE_SIZE); | |
| 200 | if (init_bootmem_done) { | 190 | else |
| 201 | p = alloc_bootmem_pages(PAGE_SIZE); | 191 | return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); |
| 202 | } else { | ||
| 203 | p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, | ||
| 204 | __initial_memory_limit_addr)); | ||
| 205 | } | ||
| 206 | return p; | ||
| 207 | } | 192 | } |
| 208 | 193 | ||
| 209 | /* Free up now-unused memory */ | 194 | /* Free up now-unused memory */ |
| @@ -252,3 +237,17 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
| 252 | } | 237 | } |
| 253 | #endif | 238 | #endif |
| 254 | 239 | ||
| 240 | |||
| 241 | #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ | ||
| 242 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 243 | phys_addr_t first_memblock_size) | ||
| 244 | { | ||
| 245 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
| 246 | * physical on those processors | ||
| 247 | */ | ||
| 248 | BUG_ON(first_memblock_base != 0); | ||
| 249 | |||
| 250 | /* 8xx can only access 8MB at the moment */ | ||
| 251 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | ||
| 252 | } | ||
| 253 | #endif /* CONFIG_8xx */ | ||
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index ace85fa74b29..6374b2196a17 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
| @@ -330,3 +330,4 @@ int __meminit vmemmap_populate(struct page *start_page, | |||
| 330 | return 0; | 330 | return 0; |
| 331 | } | 331 | } |
| 332 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 332 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
| 333 | |||
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 1a84a8d00005..a66499650909 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
| @@ -82,18 +82,11 @@ int page_is_ram(unsigned long pfn) | |||
| 82 | return pfn < max_pfn; | 82 | return pfn < max_pfn; |
| 83 | #else | 83 | #else |
| 84 | unsigned long paddr = (pfn << PAGE_SHIFT); | 84 | unsigned long paddr = (pfn << PAGE_SHIFT); |
| 85 | int i; | 85 | struct memblock_region *reg; |
| 86 | for (i=0; i < memblock.memory.cnt; i++) { | ||
| 87 | unsigned long base; | ||
| 88 | 86 | ||
| 89 | base = memblock.memory.region[i].base; | 87 | for_each_memblock(memory, reg) |
| 90 | 88 | if (paddr >= reg->base && paddr < (reg->base + reg->size)) | |
| 91 | if ((paddr >= base) && | ||
| 92 | (paddr < (base + memblock.memory.region[i].size))) { | ||
| 93 | return 1; | 89 | return 1; |
| 94 | } | ||
| 95 | } | ||
| 96 | |||
| 97 | return 0; | 90 | return 0; |
| 98 | #endif | 91 | #endif |
| 99 | } | 92 | } |
| @@ -149,23 +142,19 @@ int | |||
| 149 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | 142 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
| 150 | void *arg, int (*func)(unsigned long, unsigned long, void *)) | 143 | void *arg, int (*func)(unsigned long, unsigned long, void *)) |
| 151 | { | 144 | { |
| 152 | struct memblock_property res; | 145 | struct memblock_region *reg; |
| 153 | unsigned long pfn, len; | 146 | unsigned long end_pfn = start_pfn + nr_pages; |
| 154 | u64 end; | 147 | unsigned long tstart, tend; |
| 155 | int ret = -1; | 148 | int ret = -1; |
| 156 | 149 | ||
| 157 | res.base = (u64) start_pfn << PAGE_SHIFT; | 150 | for_each_memblock(memory, reg) { |
| 158 | res.size = (u64) nr_pages << PAGE_SHIFT; | 151 | tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); |
| 159 | 152 | tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); | |
| 160 | end = res.base + res.size - 1; | 153 | if (tstart >= tend) |
| 161 | while ((res.base < end) && (memblock_find(&res) >= 0)) { | 154 | continue; |
| 162 | pfn = (unsigned long)(res.base >> PAGE_SHIFT); | 155 | ret = (*func)(tstart, tend - tstart, arg); |
| 163 | len = (unsigned long)(res.size >> PAGE_SHIFT); | ||
| 164 | ret = (*func)(pfn, len, arg); | ||
| 165 | if (ret) | 156 | if (ret) |
| 166 | break; | 157 | break; |
| 167 | res.base += (res.size + 1); | ||
| 168 | res.size = (end - res.base + 1); | ||
| 169 | } | 158 | } |
| 170 | return ret; | 159 | return ret; |
| 171 | } | 160 | } |
| @@ -179,9 +168,9 @@ EXPORT_SYMBOL_GPL(walk_system_ram_range); | |||
| 179 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 168 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
| 180 | void __init do_init_bootmem(void) | 169 | void __init do_init_bootmem(void) |
| 181 | { | 170 | { |
| 182 | unsigned long i; | ||
| 183 | unsigned long start, bootmap_pages; | 171 | unsigned long start, bootmap_pages; |
| 184 | unsigned long total_pages; | 172 | unsigned long total_pages; |
| 173 | struct memblock_region *reg; | ||
| 185 | int boot_mapsize; | 174 | int boot_mapsize; |
| 186 | 175 | ||
| 187 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; | 176 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
| @@ -204,10 +193,10 @@ void __init do_init_bootmem(void) | |||
| 204 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); | 193 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); |
| 205 | 194 | ||
| 206 | /* Add active regions with valid PFNs */ | 195 | /* Add active regions with valid PFNs */ |
| 207 | for (i = 0; i < memblock.memory.cnt; i++) { | 196 | for_each_memblock(memory, reg) { |
| 208 | unsigned long start_pfn, end_pfn; | 197 | unsigned long start_pfn, end_pfn; |
| 209 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; | 198 | start_pfn = memblock_region_memory_base_pfn(reg); |
| 210 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 199 | end_pfn = memblock_region_memory_end_pfn(reg); |
| 211 | add_active_range(0, start_pfn, end_pfn); | 200 | add_active_range(0, start_pfn, end_pfn); |
| 212 | } | 201 | } |
| 213 | 202 | ||
| @@ -218,29 +207,21 @@ void __init do_init_bootmem(void) | |||
| 218 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); | 207 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); |
| 219 | 208 | ||
| 220 | /* reserve the sections we're already using */ | 209 | /* reserve the sections we're already using */ |
| 221 | for (i = 0; i < memblock.reserved.cnt; i++) { | 210 | for_each_memblock(reserved, reg) { |
| 222 | unsigned long addr = memblock.reserved.region[i].base + | 211 | unsigned long top = reg->base + reg->size - 1; |
| 223 | memblock_size_bytes(&memblock.reserved, i) - 1; | 212 | if (top < lowmem_end_addr) |
| 224 | if (addr < lowmem_end_addr) | 213 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
| 225 | reserve_bootmem(memblock.reserved.region[i].base, | 214 | else if (reg->base < lowmem_end_addr) { |
| 226 | memblock_size_bytes(&memblock.reserved, i), | 215 | unsigned long trunc_size = lowmem_end_addr - reg->base; |
| 227 | BOOTMEM_DEFAULT); | 216 | reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); |
| 228 | else if (memblock.reserved.region[i].base < lowmem_end_addr) { | ||
| 229 | unsigned long adjusted_size = lowmem_end_addr - | ||
| 230 | memblock.reserved.region[i].base; | ||
| 231 | reserve_bootmem(memblock.reserved.region[i].base, | ||
| 232 | adjusted_size, BOOTMEM_DEFAULT); | ||
| 233 | } | 217 | } |
| 234 | } | 218 | } |
| 235 | #else | 219 | #else |
| 236 | free_bootmem_with_active_regions(0, max_pfn); | 220 | free_bootmem_with_active_regions(0, max_pfn); |
| 237 | 221 | ||
| 238 | /* reserve the sections we're already using */ | 222 | /* reserve the sections we're already using */ |
| 239 | for (i = 0; i < memblock.reserved.cnt; i++) | 223 | for_each_memblock(reserved, reg) |
| 240 | reserve_bootmem(memblock.reserved.region[i].base, | 224 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
| 241 | memblock_size_bytes(&memblock.reserved, i), | ||
| 242 | BOOTMEM_DEFAULT); | ||
| 243 | |||
| 244 | #endif | 225 | #endif |
| 245 | /* XXX need to clip this if using highmem? */ | 226 | /* XXX need to clip this if using highmem? */ |
| 246 | sparse_memory_present_with_active_regions(0); | 227 | sparse_memory_present_with_active_regions(0); |
| @@ -251,22 +232,15 @@ void __init do_init_bootmem(void) | |||
| 251 | /* mark pages that don't exist as nosave */ | 232 | /* mark pages that don't exist as nosave */ |
| 252 | static int __init mark_nonram_nosave(void) | 233 | static int __init mark_nonram_nosave(void) |
| 253 | { | 234 | { |
| 254 | unsigned long memblock_next_region_start_pfn, | 235 | struct memblock_region *reg, *prev = NULL; |
| 255 | memblock_region_max_pfn; | 236 | |
| 256 | int i; | 237 | for_each_memblock(memory, reg) { |
| 257 | 238 | if (prev && | |
| 258 | for (i = 0; i < memblock.memory.cnt - 1; i++) { | 239 | memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) |
| 259 | memblock_region_max_pfn = | 240 | register_nosave_region(memblock_region_memory_end_pfn(prev), |
| 260 | (memblock.memory.region[i].base >> PAGE_SHIFT) + | 241 | memblock_region_memory_base_pfn(reg)); |
| 261 | (memblock.memory.region[i].size >> PAGE_SHIFT); | 242 | prev = reg; |
| 262 | memblock_next_region_start_pfn = | ||
| 263 | memblock.memory.region[i+1].base >> PAGE_SHIFT; | ||
| 264 | |||
| 265 | if (memblock_region_max_pfn < memblock_next_region_start_pfn) | ||
| 266 | register_nosave_region(memblock_region_max_pfn, | ||
| 267 | memblock_next_region_start_pfn); | ||
| 268 | } | 243 | } |
| 269 | |||
| 270 | return 0; | 244 | return 0; |
| 271 | } | 245 | } |
| 272 | 246 | ||
| @@ -327,7 +301,7 @@ void __init mem_init(void) | |||
| 327 | swiotlb_init(1); | 301 | swiotlb_init(1); |
| 328 | #endif | 302 | #endif |
| 329 | 303 | ||
| 330 | num_physpages = memblock.memory.size >> PAGE_SHIFT; | 304 | num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; |
| 331 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 305 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
| 332 | 306 | ||
| 333 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 307 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 002878ccf90b..74505b245374 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
| @@ -802,16 +802,17 @@ static void __init setup_nonnuma(void) | |||
| 802 | unsigned long top_of_ram = memblock_end_of_DRAM(); | 802 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
| 803 | unsigned long total_ram = memblock_phys_mem_size(); | 803 | unsigned long total_ram = memblock_phys_mem_size(); |
| 804 | unsigned long start_pfn, end_pfn; | 804 | unsigned long start_pfn, end_pfn; |
| 805 | unsigned int i, nid = 0; | 805 | unsigned int nid = 0; |
| 806 | struct memblock_region *reg; | ||
| 806 | 807 | ||
| 807 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | 808 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
| 808 | top_of_ram, total_ram); | 809 | top_of_ram, total_ram); |
| 809 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 810 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
| 810 | (top_of_ram - total_ram) >> 20); | 811 | (top_of_ram - total_ram) >> 20); |
| 811 | 812 | ||
| 812 | for (i = 0; i < memblock.memory.cnt; ++i) { | 813 | for_each_memblock(memory, reg) { |
| 813 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; | 814 | start_pfn = memblock_region_memory_base_pfn(reg); |
| 814 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 815 | end_pfn = memblock_region_memory_end_pfn(reg); |
| 815 | 816 | ||
| 816 | fake_numa_create_new_node(end_pfn, &nid); | 817 | fake_numa_create_new_node(end_pfn, &nid); |
| 817 | add_active_range(nid, start_pfn, end_pfn); | 818 | add_active_range(nid, start_pfn, end_pfn); |
| @@ -947,11 +948,11 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = { | |||
| 947 | static void mark_reserved_regions_for_nid(int nid) | 948 | static void mark_reserved_regions_for_nid(int nid) |
| 948 | { | 949 | { |
| 949 | struct pglist_data *node = NODE_DATA(nid); | 950 | struct pglist_data *node = NODE_DATA(nid); |
| 950 | int i; | 951 | struct memblock_region *reg; |
| 951 | 952 | ||
| 952 | for (i = 0; i < memblock.reserved.cnt; i++) { | 953 | for_each_memblock(reserved, reg) { |
| 953 | unsigned long physbase = memblock.reserved.region[i].base; | 954 | unsigned long physbase = reg->base; |
| 954 | unsigned long size = memblock.reserved.region[i].size; | 955 | unsigned long size = reg->size; |
| 955 | unsigned long start_pfn = physbase >> PAGE_SHIFT; | 956 | unsigned long start_pfn = physbase >> PAGE_SHIFT; |
| 956 | unsigned long end_pfn = PFN_UP(physbase + size); | 957 | unsigned long end_pfn = PFN_UP(physbase + size); |
| 957 | struct node_active_region node_ar; | 958 | struct node_active_region node_ar; |
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index f8a01829d64f..11571e118831 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c | |||
| @@ -223,8 +223,7 @@ void __init MMU_init_hw(void) | |||
| 223 | * Find some memory for the hash table. | 223 | * Find some memory for the hash table. |
| 224 | */ | 224 | */ |
| 225 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); | 225 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); |
| 226 | Hash = __va(memblock_alloc_base(Hash_size, Hash_size, | 226 | Hash = __va(memblock_alloc(Hash_size, Hash_size)); |
| 227 | __initial_memory_limit_addr)); | ||
| 228 | cacheable_memzero(Hash, Hash_size); | 227 | cacheable_memzero(Hash, Hash_size); |
| 229 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; | 228 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; |
| 230 | 229 | ||
| @@ -272,3 +271,18 @@ void __init MMU_init_hw(void) | |||
| 272 | 271 | ||
| 273 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); | 272 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); |
| 274 | } | 273 | } |
| 274 | |||
| 275 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 276 | phys_addr_t first_memblock_size) | ||
| 277 | { | ||
| 278 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
| 279 | * physical on those processors | ||
| 280 | */ | ||
| 281 | BUG_ON(first_memblock_base != 0); | ||
| 282 | |||
| 283 | /* 601 can only access 16MB at the moment */ | ||
| 284 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
| 285 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); | ||
| 286 | else /* Anything else has 256M mapped */ | ||
| 287 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); | ||
| 288 | } | ||
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index fe391e942521..6a0f20c25469 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
| @@ -509,6 +509,8 @@ static void __early_init_mmu(int boot_cpu) | |||
| 509 | * the MMU configuration | 509 | * the MMU configuration |
| 510 | */ | 510 | */ |
| 511 | mb(); | 511 | mb(); |
| 512 | |||
| 513 | memblock_set_current_limit(linear_map_top); | ||
| 512 | } | 514 | } |
| 513 | 515 | ||
| 514 | void __init early_init_mmu(void) | 516 | void __init early_init_mmu(void) |
| @@ -521,4 +523,18 @@ void __cpuinit early_init_mmu_secondary(void) | |||
| 521 | __early_init_mmu(0); | 523 | __early_init_mmu(0); |
| 522 | } | 524 | } |
| 523 | 525 | ||
| 526 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 527 | phys_addr_t first_memblock_size) | ||
| 528 | { | ||
| 529 | /* On Embedded 64-bit, we adjust the RMA size to match | ||
| 530 | * the bolted TLB entry. We know for now that only 1G | ||
| 531 | * entries are supported though that may eventually | ||
| 532 | * change. We crop it to the size of the first MEMBLOCK to | ||
| 533 | * avoid going over total available memory just in case... | ||
| 534 | */ | ||
| 535 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | ||
| 536 | |||
| 537 | /* Finally limit subsequent allocations */ | ||
| 538 | memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size); | ||
| 539 | } | ||
| 524 | #endif /* CONFIG_PPC64 */ | 540 | #endif /* CONFIG_PPC64 */ |
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 5cdcc7c8d973..649473a729b8 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c | |||
| @@ -65,7 +65,7 @@ static int __init page_aligned(unsigned long x) | |||
| 65 | 65 | ||
| 66 | void __init wii_memory_fixups(void) | 66 | void __init wii_memory_fixups(void) |
| 67 | { | 67 | { |
| 68 | struct memblock_property *p = memblock.memory.region; | 68 | struct memblock_region *p = memblock.memory.regions; |
| 69 | 69 | ||
| 70 | /* | 70 | /* |
| 71 | * This is part of a workaround to allow the use of two | 71 | * This is part of a workaround to allow the use of two |
