diff options
Diffstat (limited to 'arch/powerpc/mm/pgtable_64.c')
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 46 |
1 files changed, 30 insertions, 16 deletions
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index f6ce1f111f5b..c8d709ab489d 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -54,6 +54,9 @@ | |||
54 | 54 | ||
55 | #include "mmu_decl.h" | 55 | #include "mmu_decl.h" |
56 | 56 | ||
57 | #define CREATE_TRACE_POINTS | ||
58 | #include <trace/events/thp.h> | ||
59 | |||
57 | /* Some sanity checking */ | 60 | /* Some sanity checking */ |
58 | #if TASK_SIZE_USER64 > PGTABLE_RANGE | 61 | #if TASK_SIZE_USER64 > PGTABLE_RANGE |
59 | #error TASK_SIZE_USER64 exceeds pagetable range | 62 | #error TASK_SIZE_USER64 exceeds pagetable range |
@@ -68,7 +71,7 @@ | |||
68 | unsigned long ioremap_bot = IOREMAP_BASE; | 71 | unsigned long ioremap_bot = IOREMAP_BASE; |
69 | 72 | ||
70 | #ifdef CONFIG_PPC_MMU_NOHASH | 73 | #ifdef CONFIG_PPC_MMU_NOHASH |
71 | static void *early_alloc_pgtable(unsigned long size) | 74 | static __ref void *early_alloc_pgtable(unsigned long size) |
72 | { | 75 | { |
73 | void *pt; | 76 | void *pt; |
74 | 77 | ||
@@ -537,8 +540,9 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, | |||
537 | old = pmd_val(*pmdp); | 540 | old = pmd_val(*pmdp); |
538 | *pmdp = __pmd((old & ~clr) | set); | 541 | *pmdp = __pmd((old & ~clr) | set); |
539 | #endif | 542 | #endif |
543 | trace_hugepage_update(addr, old, clr, set); | ||
540 | if (old & _PAGE_HASHPTE) | 544 | if (old & _PAGE_HASHPTE) |
541 | hpte_do_hugepage_flush(mm, addr, pmdp); | 545 | hpte_do_hugepage_flush(mm, addr, pmdp, old); |
542 | return old; | 546 | return old; |
543 | } | 547 | } |
544 | 548 | ||
@@ -642,10 +646,11 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, | |||
642 | * If we didn't had the splitting flag set, go and flush the | 646 | * If we didn't had the splitting flag set, go and flush the |
643 | * HPTE entries. | 647 | * HPTE entries. |
644 | */ | 648 | */ |
649 | trace_hugepage_splitting(address, old); | ||
645 | if (!(old & _PAGE_SPLITTING)) { | 650 | if (!(old & _PAGE_SPLITTING)) { |
646 | /* We need to flush the hpte */ | 651 | /* We need to flush the hpte */ |
647 | if (old & _PAGE_HASHPTE) | 652 | if (old & _PAGE_HASHPTE) |
648 | hpte_do_hugepage_flush(vma->vm_mm, address, pmdp); | 653 | hpte_do_hugepage_flush(vma->vm_mm, address, pmdp, old); |
649 | } | 654 | } |
650 | /* | 655 | /* |
651 | * This ensures that generic code that rely on IRQ disabling | 656 | * This ensures that generic code that rely on IRQ disabling |
@@ -709,6 +714,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, | |||
709 | assert_spin_locked(&mm->page_table_lock); | 714 | assert_spin_locked(&mm->page_table_lock); |
710 | WARN_ON(!pmd_trans_huge(pmd)); | 715 | WARN_ON(!pmd_trans_huge(pmd)); |
711 | #endif | 716 | #endif |
717 | trace_hugepage_set_pmd(addr, pmd); | ||
712 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); | 718 | return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); |
713 | } | 719 | } |
714 | 720 | ||
@@ -723,7 +729,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | |||
723 | * neesd to be flushed. | 729 | * neesd to be flushed. |
724 | */ | 730 | */ |
725 | void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | 731 | void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, |
726 | pmd_t *pmdp) | 732 | pmd_t *pmdp, unsigned long old_pmd) |
727 | { | 733 | { |
728 | int ssize, i; | 734 | int ssize, i; |
729 | unsigned long s_addr; | 735 | unsigned long s_addr; |
@@ -745,12 +751,29 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | |||
745 | if (!hpte_slot_array) | 751 | if (!hpte_slot_array) |
746 | return; | 752 | return; |
747 | 753 | ||
748 | /* get the base page size */ | 754 | /* get the base page size,vsid and segment size */ |
755 | #ifdef CONFIG_DEBUG_VM | ||
749 | psize = get_slice_psize(mm, s_addr); | 756 | psize = get_slice_psize(mm, s_addr); |
757 | BUG_ON(psize == MMU_PAGE_16M); | ||
758 | #endif | ||
759 | if (old_pmd & _PAGE_COMBO) | ||
760 | psize = MMU_PAGE_4K; | ||
761 | else | ||
762 | psize = MMU_PAGE_64K; | ||
763 | |||
764 | if (!is_kernel_addr(s_addr)) { | ||
765 | ssize = user_segment_size(s_addr); | ||
766 | vsid = get_vsid(mm->context.id, s_addr, ssize); | ||
767 | WARN_ON(vsid == 0); | ||
768 | } else { | ||
769 | vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize); | ||
770 | ssize = mmu_kernel_ssize; | ||
771 | } | ||
750 | 772 | ||
751 | if (ppc_md.hugepage_invalidate) | 773 | if (ppc_md.hugepage_invalidate) |
752 | return ppc_md.hugepage_invalidate(mm, hpte_slot_array, | 774 | return ppc_md.hugepage_invalidate(vsid, s_addr, |
753 | s_addr, psize); | 775 | hpte_slot_array, |
776 | psize, ssize); | ||
754 | /* | 777 | /* |
755 | * No bluk hpte removal support, invalidate each entry | 778 | * No bluk hpte removal support, invalidate each entry |
756 | */ | 779 | */ |
@@ -768,15 +791,6 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, | |||
768 | 791 | ||
769 | /* get the vpn */ | 792 | /* get the vpn */ |
770 | addr = s_addr + (i * (1ul << shift)); | 793 | addr = s_addr + (i * (1ul << shift)); |
771 | if (!is_kernel_addr(addr)) { | ||
772 | ssize = user_segment_size(addr); | ||
773 | vsid = get_vsid(mm->context.id, addr, ssize); | ||
774 | WARN_ON(vsid == 0); | ||
775 | } else { | ||
776 | vsid = get_kernel_vsid(addr, mmu_kernel_ssize); | ||
777 | ssize = mmu_kernel_ssize; | ||
778 | } | ||
779 | |||
780 | vpn = hpt_vpn(addr, vsid, ssize); | 794 | vpn = hpt_vpn(addr, vsid, ssize); |
781 | hash = hpt_hash(vpn, shift, ssize); | 795 | hash = hpt_hash(vpn, shift, ssize); |
782 | if (hidx & _PTEIDX_SECONDARY) | 796 | if (hidx & _PTEIDX_SECONDARY) |