aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/pgtable_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/pgtable_32.c')
-rw-r--r--arch/powerpc/mm/pgtable_32.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 573b3bd1c45b..a87ead0138b4 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -26,7 +26,8 @@
26#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/lmb.h> 29#include <linux/memblock.h>
30#include <linux/slab.h>
30 31
31#include <asm/pgtable.h> 32#include <asm/pgtable.h>
32#include <asm/pgalloc.h> 33#include <asm/pgalloc.h>
@@ -114,11 +115,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
114{ 115{
115 struct page *ptepage; 116 struct page *ptepage;
116 117
117#ifdef CONFIG_HIGHPTE
118 gfp_t flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT | __GFP_ZERO;
119#else
120 gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO; 118 gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
121#endif
122 119
123 ptepage = alloc_pages(flags, 0); 120 ptepage = alloc_pages(flags, 0);
124 if (!ptepage) 121 if (!ptepage)
@@ -145,6 +142,14 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags)
145 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ 142 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
146 flags &= ~(_PAGE_USER | _PAGE_EXEC); 143 flags &= ~(_PAGE_USER | _PAGE_EXEC);
147 144
145#ifdef _PAGE_BAP_SR
146 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
147 * which means that we just cleared supervisor access... oops ;-) This
148 * restores it
149 */
150 flags |= _PAGE_BAP_SR;
151#endif
152
148 return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); 153 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
149} 154}
150EXPORT_SYMBOL(ioremap_flags); 155EXPORT_SYMBOL(ioremap_flags);
@@ -193,7 +198,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
193 * mem_init() sets high_memory so only do the check after that. 198 * mem_init() sets high_memory so only do the check after that.
194 */ 199 */
195 if (mem_init_done && (p < virt_to_phys(high_memory)) && 200 if (mem_init_done && (p < virt_to_phys(high_memory)) &&
196 !(__allow_ioremap_reserved && lmb_is_region_reserved(p, size))) { 201 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
197 printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", 202 printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n",
198 (unsigned long long)p, __builtin_return_address(0)); 203 (unsigned long long)p, __builtin_return_address(0));
199 return NULL; 204 return NULL;
@@ -326,7 +331,7 @@ void __init mapin_ram(void)
326 s = mmu_mapin_ram(top); 331 s = mmu_mapin_ram(top);
327 __mapin_ram_chunk(s, top); 332 __mapin_ram_chunk(s, top);
328 333
329 top = lmb_end_of_DRAM(); 334 top = memblock_end_of_DRAM();
330 s = wii_mmu_mapin_mem2(top); 335 s = wii_mmu_mapin_mem2(top);
331 __mapin_ram_chunk(s, top); 336 __mapin_ram_chunk(s, top);
332 } 337 }
@@ -384,11 +389,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot)
384 return -EINVAL; 389 return -EINVAL;
385 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); 390 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
386 wmb(); 391 wmb();
387#ifdef CONFIG_PPC_STD_MMU
388 flush_hash_pages(0, address, pmd_val(*kpmd), 1);
389#else
390 flush_tlb_page(NULL, address); 392 flush_tlb_page(NULL, address);
391#endif
392 pte_unmap(kpte); 393 pte_unmap(kpte);
393 394
394 return 0; 395 return 0;