summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2015-02-12 17:58:22 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-12 21:54:08 -0500
commit8a0516ed8b90c95ffa1363b420caa37418149f21 (patch)
tree64f95968661a136fa8a246419680420819dc1e0e /arch/powerpc
parente7bb4b6d1609cce391af1e7bc6f31d14f1a3a890 (diff)
mm: convert p[te|md]_numa users to p[te|md]_protnone_numa
Convert existing users of pte_numa and friends to the new helper. Note that the kernel is broken after this patch is applied until the other page table modifiers are also altered. This patch layout is to make review easier. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Acked-by: Aneesh Kumar <aneesh.kumar@linux.vnet.ibm.com> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Tested-by: Sasha Levin <sasha.levin@oracle.com> Cc: Dave Jones <davej@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Rik van Riel <riel@redhat.com> Cc: Sasha Levin <sasha.levin@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c2
-rw-r--r--arch/powerpc/mm/fault.c5
-rw-r--r--arch/powerpc/mm/pgtable.c11
-rw-r--r--arch/powerpc/mm/pgtable_64.c3
4 files changed, 11 insertions, 10 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 510bdfbc4073..625407e4d3b0 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -212,7 +212,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
212 /* Look up the Linux PTE for the backing page */ 212 /* Look up the Linux PTE for the backing page */
213 pte_size = psize; 213 pte_size = psize;
214 pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size); 214 pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size);
215 if (pte_present(pte) && !pte_numa(pte)) { 215 if (pte_present(pte) && !pte_protnone(pte)) {
216 if (writing && !pte_write(pte)) 216 if (writing && !pte_write(pte))
217 /* make the actual HPTE be read-only */ 217 /* make the actual HPTE be read-only */
218 ptel = hpte_make_readonly(ptel); 218 ptel = hpte_make_readonly(ptel);
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 6154b0a2b063..f38327b95f76 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -398,8 +398,6 @@ good_area:
398 * processors use the same I/D cache coherency mechanism 398 * processors use the same I/D cache coherency mechanism
399 * as embedded. 399 * as embedded.
400 */ 400 */
401 if (error_code & DSISR_PROTFAULT)
402 goto bad_area;
403#endif /* CONFIG_PPC_STD_MMU */ 401#endif /* CONFIG_PPC_STD_MMU */
404 402
405 /* 403 /*
@@ -423,9 +421,6 @@ good_area:
423 flags |= FAULT_FLAG_WRITE; 421 flags |= FAULT_FLAG_WRITE;
424 /* a read */ 422 /* a read */
425 } else { 423 } else {
426 /* protection fault */
427 if (error_code & 0x08000000)
428 goto bad_area;
429 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) 424 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
430 goto bad_area; 425 goto bad_area;
431 } 426 }
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index c90e602677c9..83dfcb55ffef 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -172,9 +172,14 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
172void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 172void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
173 pte_t pte) 173 pte_t pte)
174{ 174{
175#ifdef CONFIG_DEBUG_VM 175 /*
176 WARN_ON(pte_val(*ptep) & _PAGE_PRESENT); 176 * When handling numa faults, we already have the pte marked
177#endif 177 * _PAGE_PRESENT, but we can be sure that it is not in hpte.
178 * Hence we can use set_pte_at for them.
179 */
180 VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) ==
181 (_PAGE_PRESENT | _PAGE_USER));
182
178 /* Note: mm->context.id might not yet have been assigned as 183 /* Note: mm->context.id might not yet have been assigned as
179 * this context might not have been activated yet when this 184 * this context might not have been activated yet when this
180 * is called. 185 * is called.
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 4fe5f64cc179..91bb8836825a 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -718,7 +718,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
718 pmd_t *pmdp, pmd_t pmd) 718 pmd_t *pmdp, pmd_t pmd)
719{ 719{
720#ifdef CONFIG_DEBUG_VM 720#ifdef CONFIG_DEBUG_VM
721 WARN_ON(pmd_val(*pmdp) & _PAGE_PRESENT); 721 WARN_ON((pmd_val(*pmdp) & (_PAGE_PRESENT | _PAGE_USER)) ==
722 (_PAGE_PRESENT | _PAGE_USER));
722 assert_spin_locked(&mm->page_table_lock); 723 assert_spin_locked(&mm->page_table_lock);
723 WARN_ON(!pmd_trans_huge(pmd)); 724 WARN_ON(!pmd_trans_huge(pmd));
724#endif 725#endif