diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2013-06-20 05:00:18 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2013-06-21 02:01:54 -0400 |
commit | 12bc9f6fc1d6582b4529ac522d2231bd2584a5f1 (patch) | |
tree | 40be8749c9b19e0e9dfddeee54436271dce2bb2f /arch/powerpc/mm | |
parent | ac52ae4721233150a3c30e9732a1c1f4f68e7db7 (diff) |
powerpc: Replace find_linux_pte with find_linux_pte_or_hugepte
Replace find_linux_pte with find_linux_pte_or_hugepte and explicitly
document why we don't need to handle transparent hugepages at callsites.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 8 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_hash64.c | 9 |
3 files changed, 20 insertions, 5 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 2f470809876f..e8434ca6efd4 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -1145,6 +1145,7 @@ EXPORT_SYMBOL_GPL(hash_page); | |||
1145 | void hash_preload(struct mm_struct *mm, unsigned long ea, | 1145 | void hash_preload(struct mm_struct *mm, unsigned long ea, |
1146 | unsigned long access, unsigned long trap) | 1146 | unsigned long access, unsigned long trap) |
1147 | { | 1147 | { |
1148 | int hugepage_shift; | ||
1148 | unsigned long vsid; | 1149 | unsigned long vsid; |
1149 | pgd_t *pgdir; | 1150 | pgd_t *pgdir; |
1150 | pte_t *ptep; | 1151 | pte_t *ptep; |
@@ -1166,10 +1167,15 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1166 | pgdir = mm->pgd; | 1167 | pgdir = mm->pgd; |
1167 | if (pgdir == NULL) | 1168 | if (pgdir == NULL) |
1168 | return; | 1169 | return; |
1169 | ptep = find_linux_pte(pgdir, ea); | 1170 | /* |
1171 | * THP pages use update_mmu_cache_pmd. We don't do | ||
1172 | * hash preload there. Hence can ignore THP here | ||
1173 | */ | ||
1174 | ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugepage_shift); | ||
1170 | if (!ptep) | 1175 | if (!ptep) |
1171 | return; | 1176 | return; |
1172 | 1177 | ||
1178 | WARN_ON(hugepage_shift); | ||
1173 | #ifdef CONFIG_PPC_64K_PAGES | 1179 | #ifdef CONFIG_PPC_64K_PAGES |
1174 | /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on | 1180 | /* If either _PAGE_4K_PFN or _PAGE_NO_CACHE is set (and we are on |
1175 | * a 64K kernel), then we don't preload, hash_page() will take | 1181 | * a 64K kernel), then we don't preload, hash_page() will take |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 49282045ee96..8add58061003 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -105,6 +105,7 @@ int pgd_huge(pgd_t pgd) | |||
105 | 105 | ||
106 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 106 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
107 | { | 107 | { |
108 | /* Only called for hugetlbfs pages, hence can ignore THP */ | ||
108 | return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); | 109 | return find_linux_pte_or_hugepte(mm->pgd, addr, NULL); |
109 | } | 110 | } |
110 | 111 | ||
@@ -673,11 +674,14 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | |||
673 | struct page *page; | 674 | struct page *page; |
674 | unsigned shift; | 675 | unsigned shift; |
675 | unsigned long mask; | 676 | unsigned long mask; |
676 | 677 | /* | |
678 | * Transparent hugepages are handled by generic code. We can skip them | ||
679 | * here. | ||
680 | */ | ||
677 | ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); | 681 | ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift); |
678 | 682 | ||
679 | /* Verify it is a huge page else bail. */ | 683 | /* Verify it is a huge page else bail. */ |
680 | if (!ptep || !shift) | 684 | if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) |
681 | return ERR_PTR(-EINVAL); | 685 | return ERR_PTR(-EINVAL); |
682 | 686 | ||
683 | mask = (1UL << shift) - 1; | 687 | mask = (1UL << shift) - 1; |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 48bf63ea6525..313c85c5aa90 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -189,6 +189,7 @@ void tlb_flush(struct mmu_gather *tlb) | |||
189 | void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | 189 | void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, |
190 | unsigned long end) | 190 | unsigned long end) |
191 | { | 191 | { |
192 | int hugepage_shift; | ||
192 | unsigned long flags; | 193 | unsigned long flags; |
193 | 194 | ||
194 | start = _ALIGN_DOWN(start, PAGE_SIZE); | 195 | start = _ALIGN_DOWN(start, PAGE_SIZE); |
@@ -206,7 +207,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | |||
206 | local_irq_save(flags); | 207 | local_irq_save(flags); |
207 | arch_enter_lazy_mmu_mode(); | 208 | arch_enter_lazy_mmu_mode(); |
208 | for (; start < end; start += PAGE_SIZE) { | 209 | for (; start < end; start += PAGE_SIZE) { |
209 | pte_t *ptep = find_linux_pte(mm->pgd, start); | 210 | pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start, |
211 | &hugepage_shift); | ||
210 | unsigned long pte; | 212 | unsigned long pte; |
211 | 213 | ||
212 | if (ptep == NULL) | 214 | if (ptep == NULL) |
@@ -214,7 +216,10 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, | |||
214 | pte = pte_val(*ptep); | 216 | pte = pte_val(*ptep); |
215 | if (!(pte & _PAGE_HASHPTE)) | 217 | if (!(pte & _PAGE_HASHPTE)) |
216 | continue; | 218 | continue; |
217 | hpte_need_flush(mm, start, ptep, pte, 0); | 219 | if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte))) |
220 | hpte_do_hugepage_flush(mm, start, (pmd_t *)pte); | ||
221 | else | ||
222 | hpte_need_flush(mm, start, ptep, pte, 0); | ||
218 | } | 223 | } |
219 | arch_leave_lazy_mmu_mode(); | 224 | arch_leave_lazy_mmu_mode(); |
220 | local_irq_restore(flags); | 225 | local_irq_restore(flags); |