aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2015-03-25 05:11:57 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2015-04-10 06:02:47 -0400
commit4f9c53c8cc76ed84e3bb0cca8c4ffa2b170d0239 (patch)
tree9592262236d4f0a520cb98e120bff69d4eb36e3d
parent5dd4e4f6fe9495f02d4594bd460b84008a3e8e93 (diff)
powerpc: Fix compile errors with STRICT_MM_TYPECHECKS enabled
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> [mpe: Fix the 32-bit code also] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h12
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c2
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/pgtable_32.c4
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
8 files changed, 16 insertions, 14 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 2a244bf869c0..14619a59ec09 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -290,11 +290,11 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
290 pte_t old_pte, new_pte = __pte(0); 290 pte_t old_pte, new_pte = __pte(0);
291 291
292 while (1) { 292 while (1) {
293 old_pte = pte_val(*ptep); 293 old_pte = *ptep;
294 /* 294 /*
295 * wait until _PAGE_BUSY is clear then set it atomically 295 * wait until _PAGE_BUSY is clear then set it atomically
296 */ 296 */
297 if (unlikely(old_pte & _PAGE_BUSY)) { 297 if (unlikely(pte_val(old_pte) & _PAGE_BUSY)) {
298 cpu_relax(); 298 cpu_relax();
299 continue; 299 continue;
300 } 300 }
@@ -305,16 +305,18 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
305 return __pte(0); 305 return __pte(0);
306#endif 306#endif
307 /* If pte is not present return None */ 307 /* If pte is not present return None */
308 if (unlikely(!(old_pte & _PAGE_PRESENT))) 308 if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
309 return __pte(0); 309 return __pte(0);
310 310
311 new_pte = pte_mkyoung(old_pte); 311 new_pte = pte_mkyoung(old_pte);
312 if (writing && pte_write(old_pte)) 312 if (writing && pte_write(old_pte))
313 new_pte = pte_mkdirty(new_pte); 313 new_pte = pte_mkdirty(new_pte);
314 314
315 if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte, 315 if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep,
316 new_pte)) 316 pte_val(old_pte),
317 pte_val(new_pte))) {
317 break; 318 break;
319 }
318 } 320 }
319 return new_pte; 321 return new_pte;
320} 322}
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index d85e86aac7fb..169aba446a74 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -228,7 +228,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
228 do { 228 do {
229 SetPageReserved(page); 229 SetPageReserved(page);
230 map_page(vaddr, page_to_phys(page), 230 map_page(vaddr, page_to_phys(page),
231 pgprot_noncached(PAGE_KERNEL)); 231 pgprot_val(pgprot_noncached(PAGE_KERNEL)));
232 page++; 232 page++;
233 vaddr += PAGE_SIZE; 233 vaddr += PAGE_SIZE;
234 } while (size -= PAGE_SIZE); 234 } while (size -= PAGE_SIZE);
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index b46912fee7cd..9c90e66cffb6 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -181,7 +181,7 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
181 unsigned long cam_sz; 181 unsigned long cam_sz;
182 182
183 cam_sz = calc_cam_sz(ram, virt, phys); 183 cam_sz = calc_cam_sz(ram, virt, phys);
184 settlbcam(i, virt, phys, cam_sz, PAGE_KERNEL_X, 0); 184 settlbcam(i, virt, phys, cam_sz, pgprot_val(PAGE_KERNEL_X), 0);
185 185
186 ram -= cam_sz; 186 ram -= cam_sz;
187 amount_mapped += cam_sz; 187 amount_mapped += cam_sz;
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 86686514ae13..43dafb9d6a46 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -33,7 +33,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
33 * atomically mark the linux large page PMD busy and dirty 33 * atomically mark the linux large page PMD busy and dirty
34 */ 34 */
35 do { 35 do {
36 pmd_t pmd = ACCESS_ONCE(*pmdp); 36 pmd_t pmd = READ_ONCE(*pmdp);
37 37
38 old_pmd = pmd_val(pmd); 38 old_pmd = pmd_val(pmd);
39 /* If PMD busy, retry the access */ 39 /* If PMD busy, retry the access */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7e408bfc7948..fa9d5c238d22 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -964,7 +964,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
964 *shift = 0; 964 *shift = 0;
965 965
966 pgdp = pgdir + pgd_index(ea); 966 pgdp = pgdir + pgd_index(ea);
967 pgd = ACCESS_ONCE(*pgdp); 967 pgd = READ_ONCE(*pgdp);
968 /* 968 /*
969 * Always operate on the local stack value. This make sure the 969 * Always operate on the local stack value. This make sure the
970 * value don't get updated by a parallel THP split/collapse, 970 * value don't get updated by a parallel THP split/collapse,
@@ -1045,7 +1045,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
1045 if (pte_end < end) 1045 if (pte_end < end)
1046 end = pte_end; 1046 end = pte_end;
1047 1047
1048 pte = ACCESS_ONCE(*ptep); 1048 pte = READ_ONCE(*ptep);
1049 mask = _PAGE_PRESENT | _PAGE_USER; 1049 mask = _PAGE_PRESENT | _PAGE_USER;
1050 if (write) 1050 if (write)
1051 mask |= _PAGE_RW; 1051 mask |= _PAGE_RW;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 1bc1762f358d..70b4752af54f 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -189,7 +189,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
189 189
190 /* Make sure we have the base flags */ 190 /* Make sure we have the base flags */
191 if ((flags & _PAGE_PRESENT) == 0) 191 if ((flags & _PAGE_PRESENT) == 0)
192 flags |= PAGE_KERNEL; 192 flags |= pgprot_val(PAGE_KERNEL);
193 193
194 /* Non-cacheable page cannot be coherent */ 194 /* Non-cacheable page cannot be coherent */
195 if (flags & _PAGE_NO_CACHE) 195 if (flags & _PAGE_NO_CACHE)
@@ -324,7 +324,7 @@ void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
324 p = memstart_addr + s; 324 p = memstart_addr + s;
325 for (; s < top; s += PAGE_SIZE) { 325 for (; s < top; s += PAGE_SIZE) {
326 ktext = ((char *) v >= _stext && (char *) v < etext); 326 ktext = ((char *) v >= _stext && (char *) v < etext);
327 f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL; 327 f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
328 map_page(v, p, f); 328 map_page(v, p, f);
329#ifdef CONFIG_PPC_STD_MMU_32 329#ifdef CONFIG_PPC_STD_MMU_32
330 if (ktext) 330 if (ktext)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 6957cc1ca0a7..3ac3a0a1edfb 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -723,7 +723,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
723 assert_spin_locked(&mm->page_table_lock); 723 assert_spin_locked(&mm->page_table_lock);
724 WARN_ON(!pmd_trans_huge(pmd)); 724 WARN_ON(!pmd_trans_huge(pmd));
725#endif 725#endif
726 trace_hugepage_set_pmd(addr, pmd); 726 trace_hugepage_set_pmd(addr, pmd_val(pmd));
727 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); 727 return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
728} 728}
729 729
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index d2a94b85dbc2..c522969f012d 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
216 continue; 216 continue;
217 pte = pte_val(*ptep); 217 pte = pte_val(*ptep);
218 if (hugepage_shift) 218 if (hugepage_shift)
219 trace_hugepage_invalidate(start, pte_val(pte)); 219 trace_hugepage_invalidate(start, pte);
220 if (!(pte & _PAGE_HASHPTE)) 220 if (!(pte & _PAGE_HASHPTE))
221 continue; 221 continue;
222 if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) 222 if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))