aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>2014-08-13 03:02:00 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-08-13 04:20:39 -0400
commitfc0479557572375100ef16c71170b29a98e0d69a (patch)
tree89c0b9da580f2b47bcc8663deb02323052f5025b /arch/powerpc
parent629149fae478f0ac6bf705a535708b192e9c6b59 (diff)
powerpc/thp: Handle combo pages in invalidate
If we changed base page size of the segment, either via sub_page_protect or via remap_4k_pfn, we do a demote_segment which doesn't flush the hash table entries. We do a lazy hash page table flush for all mapped pages in the demoted segment. This happens when we handle hash page fault for these pages. We use _PAGE_COMBO bit along with _PAGE_HASHPTE to indicate whether a pte is backed by 4K hash pte. If we find _PAGE_COMBO not set on the pte, that implies that we could possibly have older 64K hash pte entries in the hash page table and we need to invalidate those entries. Use _PAGE_COMBO to determine the page size with which we should invalidate the hash table entries on unmap. CC: <stable@vger.kernel.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h2
-rw-r--r--arch/powerpc/mm/pgtable_64.c14
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
3 files changed, 13 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index eb9261024f51..7b3d54fae46f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -413,7 +413,7 @@ static inline char *get_hpte_slot_array(pmd_t *pmdp)
413} 413}
414 414
415extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 415extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
416 pmd_t *pmdp); 416 pmd_t *pmdp, unsigned long old_pmd);
417#ifdef CONFIG_TRANSPARENT_HUGEPAGE 417#ifdef CONFIG_TRANSPARENT_HUGEPAGE
418extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot); 418extern pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot);
419extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); 419extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 5039f3b04d6e..948a81e02ddb 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -538,7 +538,7 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
538 *pmdp = __pmd((old & ~clr) | set); 538 *pmdp = __pmd((old & ~clr) | set);
539#endif 539#endif
540 if (old & _PAGE_HASHPTE) 540 if (old & _PAGE_HASHPTE)
541 hpte_do_hugepage_flush(mm, addr, pmdp); 541 hpte_do_hugepage_flush(mm, addr, pmdp, old);
542 return old; 542 return old;
543} 543}
544 544
@@ -645,7 +645,7 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
645 if (!(old & _PAGE_SPLITTING)) { 645 if (!(old & _PAGE_SPLITTING)) {
646 /* We need to flush the hpte */ 646 /* We need to flush the hpte */
647 if (old & _PAGE_HASHPTE) 647 if (old & _PAGE_HASHPTE)
648 hpte_do_hugepage_flush(vma->vm_mm, address, pmdp); 648 hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old);
649 } 649 }
650 /* 650 /*
651 * This ensures that generic code that rely on IRQ disabling 651 * This ensures that generic code that rely on IRQ disabling
@@ -723,7 +723,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
723 * neesd to be flushed. 723 * neesd to be flushed.
724 */ 724 */
725void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 725void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
726 pmd_t *pmdp) 726 pmd_t *pmdp, unsigned long old_pmd)
727{ 727{
728 int ssize, i; 728 int ssize, i;
729 unsigned long s_addr; 729 unsigned long s_addr;
@@ -746,7 +746,15 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
746 return; 746 return;
747 747
748 /* get the base page size,vsid and segment size */ 748 /* get the base page size,vsid and segment size */
749#ifdef CONFIG_DEBUG_VM
749 psize = get_slice_psize(mm, s_addr); 750 psize = get_slice_psize(mm, s_addr);
751 BUG_ON(psize == MMU_PAGE_16M);
752#endif
753 if (old_pmd & _PAGE_COMBO)
754 psize = MMU_PAGE_4K;
755 else
756 psize = MMU_PAGE_64K;
757
750 if (!is_kernel_addr(s_addr)) { 758 if (!is_kernel_addr(s_addr)) {
751 ssize = user_segment_size(s_addr); 759 ssize = user_segment_size(s_addr);
752 vsid = get_vsid(mm->context.id, s_addr, ssize); 760 vsid = get_vsid(mm->context.id, s_addr, ssize);
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index c99f6510a0b2..9adda5790463 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
216 if (!(pte & _PAGE_HASHPTE)) 216 if (!(pte & _PAGE_HASHPTE))
217 continue; 217 continue;
218 if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) 218 if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
219 hpte_do_hugepage_flush(mm, start, (pmd_t *)pte); 219 hpte_do_hugepage_flush(mm, start, (pmd_t *)ptep, pte);
220 else 220 else
221 hpte_need_flush(mm, start, ptep, pte, 0); 221 hpte_need_flush(mm, start, ptep, pte, 0);
222 } 222 }