aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2008-10-12 13:54:24 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-10-13 19:35:27 -0400
commitf5ea64dcbad89875d130596df14c9b25d994a737 (patch)
tree03bde08104ea5c29edcaafc11576eb5594a7426c /arch
parentcd301c7ba4bbb5a0ee6ebf13eb4a304f29b13847 (diff)
powerpc: Get USE_STRICT_MM_TYPECHECKS working again
The typesafe version of the powerpc pagetable handling (with USE_STRICT_MM_TYPECHECKS defined) has bitrotted again. This patch makes a bunch of small fixes to get it back to building status. It's still not enabled by default as gcc still generates worse code with it for some reason. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/mman.h2
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h17
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h12
-rw-r--r--arch/powerpc/mm/gup.c7
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/init_64.c4
6 files changed, 24 insertions, 22 deletions
diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
index 9209f755763e..e7b99bac9f48 100644
--- a/arch/powerpc/include/asm/mman.h
+++ b/arch/powerpc/include/asm/mman.h
@@ -44,7 +44,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
44 44
45static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags) 45static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
46{ 46{
47 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : 0; 47 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
48} 48}
49#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags) 49#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
50 50
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 29c83d85b04f..6ab7c67cb5ab 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -431,11 +431,11 @@ extern int icache_44x_need_flush;
431#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 431#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
432 432
433 433
434#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ 434#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \
435 _PAGE_WRITETHRU | _PAGE_ENDIAN | \ 435 _PAGE_WRITETHRU | _PAGE_ENDIAN | \
436 _PAGE_USER | _PAGE_ACCESSED | \ 436 _PAGE_USER | _PAGE_ACCESSED | \
437 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \ 437 _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
438 _PAGE_EXEC | _PAGE_HWEXEC) 438 _PAGE_EXEC | _PAGE_HWEXEC)
439/* 439/*
440 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware 440 * Note: the _PAGE_COHERENT bit automatically gets set in the hardware
441 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need 441 * PTE if CONFIG_SMP is defined (hash_page does this); there is no need
@@ -570,9 +570,9 @@ static inline pte_t pte_mkyoung(pte_t pte) {
570 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 570 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
571static inline pte_t pte_mkspecial(pte_t pte) { 571static inline pte_t pte_mkspecial(pte_t pte) {
572 pte_val(pte) |= _PAGE_SPECIAL; return pte; } 572 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
573static inline unsigned long pte_pgprot(pte_t pte) 573static inline pgprot_t pte_pgprot(pte_t pte)
574{ 574{
575 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; 575 return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
576} 576}
577 577
578static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 578static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
@@ -688,7 +688,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
688 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) 688 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
689 : "r" (pte) : "memory"); 689 : "r" (pte) : "memory");
690#else 690#else
691 *ptep = (*ptep & _PAGE_HASHPTE) | (pte & ~_PAGE_HASHPTE); 691 *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
692 | (pte_val(pte) & ~_PAGE_HASHPTE));
692#endif 693#endif
693} 694}
694 695
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 4597c491e9b5..4c0a8c62859d 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -117,10 +117,10 @@
117#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) 117#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
118#define HAVE_PAGE_AGP 118#define HAVE_PAGE_AGP
119 119
120#define PAGE_PROT_BITS __pgprot(_PAGE_GUARDED | _PAGE_COHERENT | \ 120#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \
121 _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ 121 _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
122 _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \ 122 _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
123 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC) 123 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
124/* PTEIDX nibble */ 124/* PTEIDX nibble */
125#define _PTEIDX_SECONDARY 0x8 125#define _PTEIDX_SECONDARY 0x8
126#define _PTEIDX_GROUP_IX 0x7 126#define _PTEIDX_GROUP_IX 0x7
@@ -264,9 +264,9 @@ static inline pte_t pte_mkhuge(pte_t pte) {
264 return pte; } 264 return pte; }
265static inline pte_t pte_mkspecial(pte_t pte) { 265static inline pte_t pte_mkspecial(pte_t pte) {
266 pte_val(pte) |= _PAGE_SPECIAL; return pte; } 266 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
267static inline unsigned long pte_pgprot(pte_t pte) 267static inline pgprot_t pte_pgprot(pte_t pte)
268{ 268{
269 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; 269 return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
270} 270}
271 271
272/* Atomic PTE updates */ 272/* Atomic PTE updates */
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
index 9fdf4d6335e4..28a114db3ba0 100644
--- a/arch/powerpc/mm/gup.c
+++ b/arch/powerpc/mm/gup.c
@@ -41,7 +41,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
41 page = pte_page(pte); 41 page = pte_page(pte);
42 if (!page_cache_get_speculative(page)) 42 if (!page_cache_get_speculative(page))
43 return 0; 43 return 0;
44 if (unlikely(pte != *ptep)) { 44 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
45 put_page(page); 45 put_page(page);
46 return 0; 46 return 0;
47 } 47 }
@@ -92,7 +92,7 @@ static noinline int gup_huge_pte(pte_t *ptep, struct hstate *hstate,
92 *nr -= refs; 92 *nr -= refs;
93 return 0; 93 return 0;
94 } 94 }
95 if (unlikely(pte != *ptep)) { 95 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
96 /* Could be optimized better */ 96 /* Could be optimized better */
97 while (*nr) { 97 while (*nr) {
98 put_page(page); 98 put_page(page);
@@ -237,7 +237,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
237 pgd_t pgd = *pgdp; 237 pgd_t pgd = *pgdp;
238 238
239 VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift); 239 VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
240 pr_debug(" %016lx: normal pgd %p\n", addr, (void *)pgd); 240 pr_debug(" %016lx: normal pgd %p\n", addr,
241 (void *)pgd_val(pgd));
241 next = pgd_addr_end(addr, end); 242 next = pgd_addr_end(addr, end);
242 if (pgd_none(pgd)) 243 if (pgd_none(pgd))
243 goto slow; 244 goto slow;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 09db4efe1921..5c64af174752 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -541,7 +541,7 @@ static unsigned long __init htab_get_table_size(void)
541void create_section_mapping(unsigned long start, unsigned long end) 541void create_section_mapping(unsigned long start, unsigned long end)
542{ 542{
543 BUG_ON(htab_bolt_mapping(start, end, __pa(start), 543 BUG_ON(htab_bolt_mapping(start, end, __pa(start),
544 PAGE_KERNEL, mmu_linear_psize, 544 pgprot_val(PAGE_KERNEL), mmu_linear_psize,
545 mmu_kernel_ssize)); 545 mmu_kernel_ssize));
546} 546}
547 547
@@ -649,7 +649,7 @@ void __init htab_initialize(void)
649 mtspr(SPRN_SDR1, _SDR1); 649 mtspr(SPRN_SDR1, _SDR1);
650 } 650 }
651 651
652 prot = PAGE_KERNEL; 652 prot = pgprot_val(PAGE_KERNEL);
653 653
654#ifdef CONFIG_DEBUG_PAGEALLOC 654#ifdef CONFIG_DEBUG_PAGEALLOC
655 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; 655 linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 036fe2f10c77..3e6a6543f53a 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -228,8 +228,8 @@ int __meminit vmemmap_populate(struct page *start_page,
228 start, p, __pa(p)); 228 start, p, __pa(p));
229 229
230 mapped = htab_bolt_mapping(start, start + page_size, __pa(p), 230 mapped = htab_bolt_mapping(start, start + page_size, __pa(p),
231 PAGE_KERNEL, mmu_vmemmap_psize, 231 pgprot_val(PAGE_KERNEL),
232 mmu_kernel_ssize); 232 mmu_vmemmap_psize, mmu_kernel_ssize);
233 BUG_ON(mapped < 0); 233 BUG_ON(mapped < 0);
234 } 234 }
235 235