diff options
Diffstat (limited to 'arch/powerpc/mm/mem.c')
-rw-r--r-- | arch/powerpc/mm/mem.c | 51 |
1 files changed, 19 insertions, 32 deletions
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 0a64fffabee1..20394e52fe27 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -246,35 +246,19 @@ static int __init mark_nonram_nosave(void) | |||
246 | } | 246 | } |
247 | #endif | 247 | #endif |
248 | 248 | ||
249 | static bool zone_limits_final; | ||
250 | |||
251 | /* | 249 | /* |
252 | * The memory zones past TOP_ZONE are managed by generic mm code. | 250 | * Zones usage: |
253 | * These should be set to zero since that's what every other | 251 | * |
254 | * architecture does. | 252 | * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be |
253 | * everything else. GFP_DMA32 page allocations automatically fall back to | ||
254 | * ZONE_DMA. | ||
255 | * | ||
256 | * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to | ||
257 | * inform the generic DMA mapping code. 32-bit only devices (if not handled | ||
258 | * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get | ||
259 | * otherwise served by ZONE_DMA. | ||
255 | */ | 260 | */ |
256 | static unsigned long max_zone_pfns[MAX_NR_ZONES] = { | 261 | static unsigned long max_zone_pfns[MAX_NR_ZONES]; |
257 | [0 ... TOP_ZONE ] = ~0UL, | ||
258 | [TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0 | ||
259 | }; | ||
260 | |||
261 | /* | ||
262 | * Restrict the specified zone and all more restrictive zones | ||
263 | * to be below the specified pfn. May not be called after | ||
264 | * paging_init(). | ||
265 | */ | ||
266 | void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit) | ||
267 | { | ||
268 | int i; | ||
269 | |||
270 | if (WARN_ON(zone_limits_final)) | ||
271 | return; | ||
272 | |||
273 | for (i = zone; i >= 0; i--) { | ||
274 | if (max_zone_pfns[i] > pfn_limit) | ||
275 | max_zone_pfns[i] = pfn_limit; | ||
276 | } | ||
277 | } | ||
278 | 262 | ||
279 | /* | 263 | /* |
280 | * Find the least restrictive zone that is entirely below the | 264 | * Find the least restrictive zone that is entirely below the |
@@ -324,11 +308,14 @@ void __init paging_init(void) | |||
324 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 308 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
325 | (long int)((top_of_ram - total_ram) >> 20)); | 309 | (long int)((top_of_ram - total_ram) >> 20)); |
326 | 310 | ||
311 | #ifdef CONFIG_ZONE_DMA | ||
312 | max_zone_pfns[ZONE_DMA] = min(max_low_pfn, 0x7fffffffUL >> PAGE_SHIFT); | ||
313 | #endif | ||
314 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | ||
327 | #ifdef CONFIG_HIGHMEM | 315 | #ifdef CONFIG_HIGHMEM |
328 | limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT); | 316 | max_zone_pfns[ZONE_HIGHMEM] = max_pfn; |
329 | #endif | 317 | #endif |
330 | limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT); | 318 | |
331 | zone_limits_final = true; | ||
332 | free_area_init_nodes(max_zone_pfns); | 319 | free_area_init_nodes(max_zone_pfns); |
333 | 320 | ||
334 | mark_nonram_nosave(); | 321 | mark_nonram_nosave(); |
@@ -503,7 +490,7 @@ EXPORT_SYMBOL(flush_icache_user_range); | |||
503 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | 490 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, |
504 | pte_t *ptep) | 491 | pte_t *ptep) |
505 | { | 492 | { |
506 | #ifdef CONFIG_PPC_STD_MMU | 493 | #ifdef CONFIG_PPC_BOOK3S |
507 | /* | 494 | /* |
508 | * We don't need to worry about _PAGE_PRESENT here because we are | 495 | * We don't need to worry about _PAGE_PRESENT here because we are |
509 | * called with either mm->page_table_lock held or ptl lock held | 496 | * called with either mm->page_table_lock held or ptl lock held |
@@ -541,7 +528,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, | |||
541 | } | 528 | } |
542 | 529 | ||
543 | hash_preload(vma->vm_mm, address, is_exec, trap); | 530 | hash_preload(vma->vm_mm, address, is_exec, trap); |
544 | #endif /* CONFIG_PPC_STD_MMU */ | 531 | #endif /* CONFIG_PPC_BOOK3S */ |
545 | #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ | 532 | #if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \ |
546 | && defined(CONFIG_HUGETLB_PAGE) | 533 | && defined(CONFIG_HUGETLB_PAGE) |
547 | if (is_vm_hugetlb_page(vma)) | 534 | if (is_vm_hugetlb_page(vma)) |