diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 21:52:11 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-21 21:52:11 -0400 |
| commit | 3044100e58c84e133791c8b60a2f5bef69d732e4 (patch) | |
| tree | f9ed0d1f3df89c31dd81ccaf0cf3478f57b08440 /arch/powerpc/mm | |
| parent | b5153163ed580e00c67bdfecb02b2e3843817b3e (diff) | |
| parent | 67e87f0a1c5cbc750f81ebf6a128e8ff6f4376cc (diff) | |
Merge branch 'core-memblock-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-memblock-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (74 commits)
x86-64: Only set max_pfn_mapped to 512 MiB if we enter via head_64.S
xen: Cope with unmapped pages when initializing kernel pagetable
memblock, bootmem: Round pfn properly for memory and reserved regions
memblock: Annotate memblock functions with __init_memblock
memblock: Allow memblock_init to be called early
memblock/arm: Fix memblock_region_is_memory() typo
x86, memblock: Remove __memblock_x86_find_in_range_size()
memblock: Fix wraparound in find_region()
x86-32, memblock: Make add_highpages honor early reserved ranges
x86, memblock: Fix crashkernel allocation
arm, memblock: Fix the sparsemem build
memblock: Fix section mismatch warnings
powerpc, memblock: Fix memblock API change fallout
memblock, microblaze: Fix memblock API change fallout
x86: Remove old bootmem code
x86, memblock: Use memblock_memory_size()/memblock_free_memory_size() to get correct dma_reserve
x86: Remove not used early_res code
x86, memblock: Replace e820_/_early string with memblock_
x86: Use memblock to replace early_res
x86, memblock: Use memblock_debug to control debug message print out
...
Fix up trivial conflicts in arch/x86/kernel/setup.c and kernel/Makefile
Diffstat (limited to 'arch/powerpc/mm')
| -rw-r--r-- | arch/powerpc/mm/40x_mmu.c | 17 | ||||
| -rw-r--r-- | arch/powerpc/mm/44x_mmu.c | 14 | ||||
| -rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 12 | ||||
| -rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 35 | ||||
| -rw-r--r-- | arch/powerpc/mm/init_32.c | 43 | ||||
| -rw-r--r-- | arch/powerpc/mm/init_64.c | 1 | ||||
| -rw-r--r-- | arch/powerpc/mm/mem.c | 94 | ||||
| -rw-r--r-- | arch/powerpc/mm/numa.c | 17 | ||||
| -rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 18 | ||||
| -rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 16 |
10 files changed, 165 insertions, 102 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 1dc2fa5ce1bd..5810967511d4 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
| 36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
| 37 | #include <linux/highmem.h> | 37 | #include <linux/highmem.h> |
| 38 | #include <linux/memblock.h> | ||
| 38 | 39 | ||
| 39 | #include <asm/pgalloc.h> | 40 | #include <asm/pgalloc.h> |
| 40 | #include <asm/prom.h> | 41 | #include <asm/prom.h> |
| @@ -47,6 +48,7 @@ | |||
| 47 | #include <asm/bootx.h> | 48 | #include <asm/bootx.h> |
| 48 | #include <asm/machdep.h> | 49 | #include <asm/machdep.h> |
| 49 | #include <asm/setup.h> | 50 | #include <asm/setup.h> |
| 51 | |||
| 50 | #include "mmu_decl.h" | 52 | #include "mmu_decl.h" |
| 51 | 53 | ||
| 52 | extern int __map_without_ltlbs; | 54 | extern int __map_without_ltlbs; |
| @@ -139,8 +141,19 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
| 139 | * coverage with normal-sized pages (or other reasons) do not | 141 | * coverage with normal-sized pages (or other reasons) do not |
| 140 | * attempt to allocate outside the allowed range. | 142 | * attempt to allocate outside the allowed range. |
| 141 | */ | 143 | */ |
| 142 | 144 | memblock_set_current_limit(mapped); | |
| 143 | __initial_memory_limit_addr = memstart_addr + mapped; | ||
| 144 | 145 | ||
| 145 | return mapped; | 146 | return mapped; |
| 146 | } | 147 | } |
| 148 | |||
| 149 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 150 | phys_addr_t first_memblock_size) | ||
| 151 | { | ||
| 152 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
| 153 | * physical on those processors | ||
| 154 | */ | ||
| 155 | BUG_ON(first_memblock_base != 0); | ||
| 156 | |||
| 157 | /* 40x can only access 16MB at the moment (see head_40x.S) */ | ||
| 158 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | ||
| 159 | } | ||
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index d8c6efb32bc6..024acab588fd 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c | |||
| @@ -24,6 +24,8 @@ | |||
| 24 | */ | 24 | */ |
| 25 | 25 | ||
| 26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
| 27 | #include <linux/memblock.h> | ||
| 28 | |||
| 27 | #include <asm/mmu.h> | 29 | #include <asm/mmu.h> |
| 28 | #include <asm/system.h> | 30 | #include <asm/system.h> |
| 29 | #include <asm/page.h> | 31 | #include <asm/page.h> |
| @@ -213,6 +215,18 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
| 213 | return total_lowmem; | 215 | return total_lowmem; |
| 214 | } | 216 | } |
| 215 | 217 | ||
| 218 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 219 | phys_addr_t first_memblock_size) | ||
| 220 | { | ||
| 221 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
| 222 | * physical on those processors | ||
| 223 | */ | ||
| 224 | BUG_ON(first_memblock_base != 0); | ||
| 225 | |||
| 226 | /* 44x has a 256M TLB entry pinned at boot */ | ||
| 227 | memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE)); | ||
| 228 | } | ||
| 229 | |||
| 216 | #ifdef CONFIG_SMP | 230 | #ifdef CONFIG_SMP |
| 217 | void __cpuinit mmu_init_secondary(int cpu) | 231 | void __cpuinit mmu_init_secondary(int cpu) |
| 218 | { | 232 | { |
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 4b66a1ece6d8..cde270847e7c 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/init.h> | 40 | #include <linux/init.h> |
| 41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
| 42 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
| 43 | #include <linux/memblock.h> | ||
| 43 | 44 | ||
| 44 | #include <asm/pgalloc.h> | 45 | #include <asm/pgalloc.h> |
| 45 | #include <asm/prom.h> | 46 | #include <asm/prom.h> |
| @@ -213,5 +214,14 @@ void __init adjust_total_lowmem(void) | |||
| 213 | pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, | 214 | pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, |
| 214 | (unsigned int)((total_lowmem - __max_low_memory) >> 20)); | 215 | (unsigned int)((total_lowmem - __max_low_memory) >> 20)); |
| 215 | 216 | ||
| 216 | __initial_memory_limit_addr = memstart_addr + __max_low_memory; | 217 | memblock_set_current_limit(memstart_addr + __max_low_memory); |
| 218 | } | ||
| 219 | |||
| 220 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 221 | phys_addr_t first_memblock_size) | ||
| 222 | { | ||
| 223 | phys_addr_t limit = first_memblock_base + first_memblock_size; | ||
| 224 | |||
| 225 | /* 64M mapped initially according to head_fsl_booke.S */ | ||
| 226 | memblock_set_current_limit(min_t(u64, limit, 0x04000000)); | ||
| 217 | } | 227 | } |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 09dffe6efa46..83f534d862db 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
| @@ -588,7 +588,7 @@ static void __init htab_initialize(void) | |||
| 588 | unsigned long pteg_count; | 588 | unsigned long pteg_count; |
| 589 | unsigned long prot; | 589 | unsigned long prot; |
| 590 | unsigned long base = 0, size = 0, limit; | 590 | unsigned long base = 0, size = 0, limit; |
| 591 | int i; | 591 | struct memblock_region *reg; |
| 592 | 592 | ||
| 593 | DBG(" -> htab_initialize()\n"); | 593 | DBG(" -> htab_initialize()\n"); |
| 594 | 594 | ||
| @@ -625,7 +625,7 @@ static void __init htab_initialize(void) | |||
| 625 | if (machine_is(cell)) | 625 | if (machine_is(cell)) |
| 626 | limit = 0x80000000; | 626 | limit = 0x80000000; |
| 627 | else | 627 | else |
| 628 | limit = 0; | 628 | limit = MEMBLOCK_ALLOC_ANYWHERE; |
| 629 | 629 | ||
| 630 | table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); | 630 | table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); |
| 631 | 631 | ||
| @@ -649,7 +649,7 @@ static void __init htab_initialize(void) | |||
| 649 | #ifdef CONFIG_DEBUG_PAGEALLOC | 649 | #ifdef CONFIG_DEBUG_PAGEALLOC |
| 650 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; | 650 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; |
| 651 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, | 651 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, |
| 652 | 1, memblock.rmo_size)); | 652 | 1, ppc64_rma_size)); |
| 653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); | 653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); |
| 654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
| 655 | 655 | ||
| @@ -659,9 +659,9 @@ static void __init htab_initialize(void) | |||
| 659 | */ | 659 | */ |
| 660 | 660 | ||
| 661 | /* create bolted the linear mapping in the hash table */ | 661 | /* create bolted the linear mapping in the hash table */ |
| 662 | for (i=0; i < memblock.memory.cnt; i++) { | 662 | for_each_memblock(memory, reg) { |
| 663 | base = (unsigned long)__va(memblock.memory.region[i].base); | 663 | base = (unsigned long)__va(reg->base); |
| 664 | size = memblock.memory.region[i].size; | 664 | size = reg->size; |
| 665 | 665 | ||
| 666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", | 666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", |
| 667 | base, size, prot); | 667 | base, size, prot); |
| @@ -696,7 +696,8 @@ static void __init htab_initialize(void) | |||
| 696 | #endif /* CONFIG_U3_DART */ | 696 | #endif /* CONFIG_U3_DART */ |
| 697 | BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), | 697 | BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), |
| 698 | prot, mmu_linear_psize, mmu_kernel_ssize)); | 698 | prot, mmu_linear_psize, mmu_kernel_ssize)); |
| 699 | } | 699 | } |
| 700 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); | ||
| 700 | 701 | ||
| 701 | /* | 702 | /* |
| 702 | * If we have a memory_limit and we've allocated TCEs then we need to | 703 | * If we have a memory_limit and we've allocated TCEs then we need to |
| @@ -1247,3 +1248,23 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
| 1247 | local_irq_restore(flags); | 1248 | local_irq_restore(flags); |
| 1248 | } | 1249 | } |
| 1249 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 1250 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
| 1251 | |||
| 1252 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 1253 | phys_addr_t first_memblock_size) | ||
| 1254 | { | ||
| 1255 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
| 1256 | * physical on those processors | ||
| 1257 | */ | ||
| 1258 | BUG_ON(first_memblock_base != 0); | ||
| 1259 | |||
| 1260 | /* On LPAR systems, the first entry is our RMA region, | ||
| 1261 | * non-LPAR 64-bit hash MMU systems don't have a limitation | ||
| 1262 | * on real mode access, but using the first entry works well | ||
| 1263 | * enough. We also clamp it to 1G to avoid some funky things | ||
| 1264 | * such as RTAS bugs etc... | ||
| 1265 | */ | ||
| 1266 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | ||
| 1267 | |||
| 1268 | /* Finally limit subsequent allocations */ | ||
| 1269 | memblock_set_current_limit(ppc64_rma_size); | ||
| 1270 | } | ||
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 6a6975dc2654..742da43b4ab6 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
| @@ -92,12 +92,6 @@ int __allow_ioremap_reserved; | |||
| 92 | unsigned long __max_low_memory = MAX_LOW_MEM; | 92 | unsigned long __max_low_memory = MAX_LOW_MEM; |
| 93 | 93 | ||
| 94 | /* | 94 | /* |
| 95 | * address of the limit of what is accessible with initial MMU setup - | ||
| 96 | * 256MB usually, but only 16MB on 601. | ||
| 97 | */ | ||
| 98 | phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000; | ||
| 99 | |||
| 100 | /* | ||
| 101 | * Check for command-line options that affect what MMU_init will do. | 95 | * Check for command-line options that affect what MMU_init will do. |
| 102 | */ | 96 | */ |
| 103 | void MMU_setup(void) | 97 | void MMU_setup(void) |
| @@ -126,13 +120,6 @@ void __init MMU_init(void) | |||
| 126 | if (ppc_md.progress) | 120 | if (ppc_md.progress) |
| 127 | ppc_md.progress("MMU:enter", 0x111); | 121 | ppc_md.progress("MMU:enter", 0x111); |
| 128 | 122 | ||
| 129 | /* 601 can only access 16MB at the moment */ | ||
| 130 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
| 131 | __initial_memory_limit_addr = 0x01000000; | ||
| 132 | /* 8xx can only access 8MB at the moment */ | ||
| 133 | if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) | ||
| 134 | __initial_memory_limit_addr = 0x00800000; | ||
| 135 | |||
| 136 | /* parse args from command line */ | 123 | /* parse args from command line */ |
| 137 | MMU_setup(); | 124 | MMU_setup(); |
| 138 | 125 | ||
| @@ -190,20 +177,18 @@ void __init MMU_init(void) | |||
| 190 | #ifdef CONFIG_BOOTX_TEXT | 177 | #ifdef CONFIG_BOOTX_TEXT |
| 191 | btext_unmap(); | 178 | btext_unmap(); |
| 192 | #endif | 179 | #endif |
| 180 | |||
| 181 | /* Shortly after that, the entire linear mapping will be available */ | ||
| 182 | memblock_set_current_limit(lowmem_end_addr); | ||
| 193 | } | 183 | } |
| 194 | 184 | ||
| 195 | /* This is only called until mem_init is done. */ | 185 | /* This is only called until mem_init is done. */ |
| 196 | void __init *early_get_page(void) | 186 | void __init *early_get_page(void) |
| 197 | { | 187 | { |
| 198 | void *p; | 188 | if (init_bootmem_done) |
| 199 | 189 | return alloc_bootmem_pages(PAGE_SIZE); | |
| 200 | if (init_bootmem_done) { | 190 | else |
| 201 | p = alloc_bootmem_pages(PAGE_SIZE); | 191 | return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); |
| 202 | } else { | ||
| 203 | p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, | ||
| 204 | __initial_memory_limit_addr)); | ||
| 205 | } | ||
| 206 | return p; | ||
| 207 | } | 192 | } |
| 208 | 193 | ||
| 209 | /* Free up now-unused memory */ | 194 | /* Free up now-unused memory */ |
| @@ -252,3 +237,17 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
| 252 | } | 237 | } |
| 253 | #endif | 238 | #endif |
| 254 | 239 | ||
| 240 | |||
| 241 | #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ | ||
| 242 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 243 | phys_addr_t first_memblock_size) | ||
| 244 | { | ||
| 245 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
| 246 | * physical on those processors | ||
| 247 | */ | ||
| 248 | BUG_ON(first_memblock_base != 0); | ||
| 249 | |||
| 250 | /* 8xx can only access 8MB at the moment */ | ||
| 251 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | ||
| 252 | } | ||
| 253 | #endif /* CONFIG_8xx */ | ||
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index ace85fa74b29..6374b2196a17 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
| @@ -330,3 +330,4 @@ int __meminit vmemmap_populate(struct page *start_page, | |||
| 330 | return 0; | 330 | return 0; |
| 331 | } | 331 | } |
| 332 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 332 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
| 333 | |||
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 1a84a8d00005..a66499650909 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
| @@ -82,18 +82,11 @@ int page_is_ram(unsigned long pfn) | |||
| 82 | return pfn < max_pfn; | 82 | return pfn < max_pfn; |
| 83 | #else | 83 | #else |
| 84 | unsigned long paddr = (pfn << PAGE_SHIFT); | 84 | unsigned long paddr = (pfn << PAGE_SHIFT); |
| 85 | int i; | 85 | struct memblock_region *reg; |
| 86 | for (i=0; i < memblock.memory.cnt; i++) { | ||
| 87 | unsigned long base; | ||
| 88 | 86 | ||
| 89 | base = memblock.memory.region[i].base; | 87 | for_each_memblock(memory, reg) |
| 90 | 88 | if (paddr >= reg->base && paddr < (reg->base + reg->size)) | |
| 91 | if ((paddr >= base) && | ||
| 92 | (paddr < (base + memblock.memory.region[i].size))) { | ||
| 93 | return 1; | 89 | return 1; |
| 94 | } | ||
| 95 | } | ||
| 96 | |||
| 97 | return 0; | 90 | return 0; |
| 98 | #endif | 91 | #endif |
| 99 | } | 92 | } |
| @@ -149,23 +142,19 @@ int | |||
| 149 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | 142 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
| 150 | void *arg, int (*func)(unsigned long, unsigned long, void *)) | 143 | void *arg, int (*func)(unsigned long, unsigned long, void *)) |
| 151 | { | 144 | { |
| 152 | struct memblock_property res; | 145 | struct memblock_region *reg; |
| 153 | unsigned long pfn, len; | 146 | unsigned long end_pfn = start_pfn + nr_pages; |
| 154 | u64 end; | 147 | unsigned long tstart, tend; |
| 155 | int ret = -1; | 148 | int ret = -1; |
| 156 | 149 | ||
| 157 | res.base = (u64) start_pfn << PAGE_SHIFT; | 150 | for_each_memblock(memory, reg) { |
| 158 | res.size = (u64) nr_pages << PAGE_SHIFT; | 151 | tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); |
| 159 | 152 | tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); | |
| 160 | end = res.base + res.size - 1; | 153 | if (tstart >= tend) |
| 161 | while ((res.base < end) && (memblock_find(&res) >= 0)) { | 154 | continue; |
| 162 | pfn = (unsigned long)(res.base >> PAGE_SHIFT); | 155 | ret = (*func)(tstart, tend - tstart, arg); |
| 163 | len = (unsigned long)(res.size >> PAGE_SHIFT); | ||
| 164 | ret = (*func)(pfn, len, arg); | ||
| 165 | if (ret) | 156 | if (ret) |
| 166 | break; | 157 | break; |
| 167 | res.base += (res.size + 1); | ||
| 168 | res.size = (end - res.base + 1); | ||
| 169 | } | 158 | } |
| 170 | return ret; | 159 | return ret; |
| 171 | } | 160 | } |
| @@ -179,9 +168,9 @@ EXPORT_SYMBOL_GPL(walk_system_ram_range); | |||
| 179 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 168 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
| 180 | void __init do_init_bootmem(void) | 169 | void __init do_init_bootmem(void) |
| 181 | { | 170 | { |
| 182 | unsigned long i; | ||
| 183 | unsigned long start, bootmap_pages; | 171 | unsigned long start, bootmap_pages; |
| 184 | unsigned long total_pages; | 172 | unsigned long total_pages; |
| 173 | struct memblock_region *reg; | ||
| 185 | int boot_mapsize; | 174 | int boot_mapsize; |
| 186 | 175 | ||
| 187 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; | 176 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
| @@ -204,10 +193,10 @@ void __init do_init_bootmem(void) | |||
| 204 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); | 193 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); |
| 205 | 194 | ||
| 206 | /* Add active regions with valid PFNs */ | 195 | /* Add active regions with valid PFNs */ |
| 207 | for (i = 0; i < memblock.memory.cnt; i++) { | 196 | for_each_memblock(memory, reg) { |
| 208 | unsigned long start_pfn, end_pfn; | 197 | unsigned long start_pfn, end_pfn; |
| 209 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; | 198 | start_pfn = memblock_region_memory_base_pfn(reg); |
| 210 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 199 | end_pfn = memblock_region_memory_end_pfn(reg); |
| 211 | add_active_range(0, start_pfn, end_pfn); | 200 | add_active_range(0, start_pfn, end_pfn); |
| 212 | } | 201 | } |
| 213 | 202 | ||
| @@ -218,29 +207,21 @@ void __init do_init_bootmem(void) | |||
| 218 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); | 207 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); |
| 219 | 208 | ||
| 220 | /* reserve the sections we're already using */ | 209 | /* reserve the sections we're already using */ |
| 221 | for (i = 0; i < memblock.reserved.cnt; i++) { | 210 | for_each_memblock(reserved, reg) { |
| 222 | unsigned long addr = memblock.reserved.region[i].base + | 211 | unsigned long top = reg->base + reg->size - 1; |
| 223 | memblock_size_bytes(&memblock.reserved, i) - 1; | 212 | if (top < lowmem_end_addr) |
| 224 | if (addr < lowmem_end_addr) | 213 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
| 225 | reserve_bootmem(memblock.reserved.region[i].base, | 214 | else if (reg->base < lowmem_end_addr) { |
| 226 | memblock_size_bytes(&memblock.reserved, i), | 215 | unsigned long trunc_size = lowmem_end_addr - reg->base; |
| 227 | BOOTMEM_DEFAULT); | 216 | reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); |
| 228 | else if (memblock.reserved.region[i].base < lowmem_end_addr) { | ||
| 229 | unsigned long adjusted_size = lowmem_end_addr - | ||
| 230 | memblock.reserved.region[i].base; | ||
| 231 | reserve_bootmem(memblock.reserved.region[i].base, | ||
| 232 | adjusted_size, BOOTMEM_DEFAULT); | ||
| 233 | } | 217 | } |
| 234 | } | 218 | } |
| 235 | #else | 219 | #else |
| 236 | free_bootmem_with_active_regions(0, max_pfn); | 220 | free_bootmem_with_active_regions(0, max_pfn); |
| 237 | 221 | ||
| 238 | /* reserve the sections we're already using */ | 222 | /* reserve the sections we're already using */ |
| 239 | for (i = 0; i < memblock.reserved.cnt; i++) | 223 | for_each_memblock(reserved, reg) |
| 240 | reserve_bootmem(memblock.reserved.region[i].base, | 224 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
| 241 | memblock_size_bytes(&memblock.reserved, i), | ||
| 242 | BOOTMEM_DEFAULT); | ||
| 243 | |||
| 244 | #endif | 225 | #endif |
| 245 | /* XXX need to clip this if using highmem? */ | 226 | /* XXX need to clip this if using highmem? */ |
| 246 | sparse_memory_present_with_active_regions(0); | 227 | sparse_memory_present_with_active_regions(0); |
| @@ -251,22 +232,15 @@ void __init do_init_bootmem(void) | |||
| 251 | /* mark pages that don't exist as nosave */ | 232 | /* mark pages that don't exist as nosave */ |
| 252 | static int __init mark_nonram_nosave(void) | 233 | static int __init mark_nonram_nosave(void) |
| 253 | { | 234 | { |
| 254 | unsigned long memblock_next_region_start_pfn, | 235 | struct memblock_region *reg, *prev = NULL; |
| 255 | memblock_region_max_pfn; | 236 | |
| 256 | int i; | 237 | for_each_memblock(memory, reg) { |
| 257 | 238 | if (prev && | |
| 258 | for (i = 0; i < memblock.memory.cnt - 1; i++) { | 239 | memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) |
| 259 | memblock_region_max_pfn = | 240 | register_nosave_region(memblock_region_memory_end_pfn(prev), |
| 260 | (memblock.memory.region[i].base >> PAGE_SHIFT) + | 241 | memblock_region_memory_base_pfn(reg)); |
| 261 | (memblock.memory.region[i].size >> PAGE_SHIFT); | 242 | prev = reg; |
| 262 | memblock_next_region_start_pfn = | ||
| 263 | memblock.memory.region[i+1].base >> PAGE_SHIFT; | ||
| 264 | |||
| 265 | if (memblock_region_max_pfn < memblock_next_region_start_pfn) | ||
| 266 | register_nosave_region(memblock_region_max_pfn, | ||
| 267 | memblock_next_region_start_pfn); | ||
| 268 | } | 243 | } |
| 269 | |||
| 270 | return 0; | 244 | return 0; |
| 271 | } | 245 | } |
| 272 | 246 | ||
| @@ -327,7 +301,7 @@ void __init mem_init(void) | |||
| 327 | swiotlb_init(1); | 301 | swiotlb_init(1); |
| 328 | #endif | 302 | #endif |
| 329 | 303 | ||
| 330 | num_physpages = memblock.memory.size >> PAGE_SHIFT; | 304 | num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; |
| 331 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 305 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
| 332 | 306 | ||
| 333 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 307 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 002878ccf90b..74505b245374 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
| @@ -802,16 +802,17 @@ static void __init setup_nonnuma(void) | |||
| 802 | unsigned long top_of_ram = memblock_end_of_DRAM(); | 802 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
| 803 | unsigned long total_ram = memblock_phys_mem_size(); | 803 | unsigned long total_ram = memblock_phys_mem_size(); |
| 804 | unsigned long start_pfn, end_pfn; | 804 | unsigned long start_pfn, end_pfn; |
| 805 | unsigned int i, nid = 0; | 805 | unsigned int nid = 0; |
| 806 | struct memblock_region *reg; | ||
| 806 | 807 | ||
| 807 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | 808 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
| 808 | top_of_ram, total_ram); | 809 | top_of_ram, total_ram); |
| 809 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 810 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
| 810 | (top_of_ram - total_ram) >> 20); | 811 | (top_of_ram - total_ram) >> 20); |
| 811 | 812 | ||
| 812 | for (i = 0; i < memblock.memory.cnt; ++i) { | 813 | for_each_memblock(memory, reg) { |
| 813 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; | 814 | start_pfn = memblock_region_memory_base_pfn(reg); |
| 814 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 815 | end_pfn = memblock_region_memory_end_pfn(reg); |
| 815 | 816 | ||
| 816 | fake_numa_create_new_node(end_pfn, &nid); | 817 | fake_numa_create_new_node(end_pfn, &nid); |
| 817 | add_active_range(nid, start_pfn, end_pfn); | 818 | add_active_range(nid, start_pfn, end_pfn); |
| @@ -947,11 +948,11 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = { | |||
| 947 | static void mark_reserved_regions_for_nid(int nid) | 948 | static void mark_reserved_regions_for_nid(int nid) |
| 948 | { | 949 | { |
| 949 | struct pglist_data *node = NODE_DATA(nid); | 950 | struct pglist_data *node = NODE_DATA(nid); |
| 950 | int i; | 951 | struct memblock_region *reg; |
| 951 | 952 | ||
| 952 | for (i = 0; i < memblock.reserved.cnt; i++) { | 953 | for_each_memblock(reserved, reg) { |
| 953 | unsigned long physbase = memblock.reserved.region[i].base; | 954 | unsigned long physbase = reg->base; |
| 954 | unsigned long size = memblock.reserved.region[i].size; | 955 | unsigned long size = reg->size; |
| 955 | unsigned long start_pfn = physbase >> PAGE_SHIFT; | 956 | unsigned long start_pfn = physbase >> PAGE_SHIFT; |
| 956 | unsigned long end_pfn = PFN_UP(physbase + size); | 957 | unsigned long end_pfn = PFN_UP(physbase + size); |
| 957 | struct node_active_region node_ar; | 958 | struct node_active_region node_ar; |
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index f8a01829d64f..11571e118831 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c | |||
| @@ -223,8 +223,7 @@ void __init MMU_init_hw(void) | |||
| 223 | * Find some memory for the hash table. | 223 | * Find some memory for the hash table. |
| 224 | */ | 224 | */ |
| 225 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); | 225 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); |
| 226 | Hash = __va(memblock_alloc_base(Hash_size, Hash_size, | 226 | Hash = __va(memblock_alloc(Hash_size, Hash_size)); |
| 227 | __initial_memory_limit_addr)); | ||
| 228 | cacheable_memzero(Hash, Hash_size); | 227 | cacheable_memzero(Hash, Hash_size); |
| 229 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; | 228 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; |
| 230 | 229 | ||
| @@ -272,3 +271,18 @@ void __init MMU_init_hw(void) | |||
| 272 | 271 | ||
| 273 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); | 272 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); |
| 274 | } | 273 | } |
| 274 | |||
| 275 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 276 | phys_addr_t first_memblock_size) | ||
| 277 | { | ||
| 278 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
| 279 | * physical on those processors | ||
| 280 | */ | ||
| 281 | BUG_ON(first_memblock_base != 0); | ||
| 282 | |||
| 283 | /* 601 can only access 16MB at the moment */ | ||
| 284 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
| 285 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); | ||
| 286 | else /* Anything else has 256M mapped */ | ||
| 287 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); | ||
| 288 | } | ||
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index fe391e942521..6a0f20c25469 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
| @@ -509,6 +509,8 @@ static void __early_init_mmu(int boot_cpu) | |||
| 509 | * the MMU configuration | 509 | * the MMU configuration |
| 510 | */ | 510 | */ |
| 511 | mb(); | 511 | mb(); |
| 512 | |||
| 513 | memblock_set_current_limit(linear_map_top); | ||
| 512 | } | 514 | } |
| 513 | 515 | ||
| 514 | void __init early_init_mmu(void) | 516 | void __init early_init_mmu(void) |
| @@ -521,4 +523,18 @@ void __cpuinit early_init_mmu_secondary(void) | |||
| 521 | __early_init_mmu(0); | 523 | __early_init_mmu(0); |
| 522 | } | 524 | } |
| 523 | 525 | ||
| 526 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
| 527 | phys_addr_t first_memblock_size) | ||
| 528 | { | ||
| 529 | /* On Embedded 64-bit, we adjust the RMA size to match | ||
| 530 | * the bolted TLB entry. We know for now that only 1G | ||
| 531 | * entries are supported though that may eventually | ||
| 532 | * change. We crop it to the size of the first MEMBLOCK to | ||
| 533 | * avoid going over total available memory just in case... | ||
| 534 | */ | ||
| 535 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | ||
| 536 | |||
| 537 | /* Finally limit subsequent allocations */ | ||
| 538 | memblock_set_current_limit(ppc64_memblock_base + ppc64_rma_size); | ||
| 539 | } | ||
| 524 | #endif /* CONFIG_PPC64 */ | 540 | #endif /* CONFIG_PPC64 */ |
