aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/tlb_hash64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/tlb_hash64.c')
-rw-r--r--arch/powerpc/mm/tlb_hash64.c40
1 files changed, 36 insertions, 4 deletions
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 023ec8a13f38..36e44b4260eb 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -183,12 +183,13 @@ void tlb_flush(struct mmu_gather *tlb)
183 * since 64K pages may overlap with other bridges when using 64K pages 183 * since 64K pages may overlap with other bridges when using 64K pages
184 * with 4K HW pages on IO space. 184 * with 4K HW pages on IO space.
185 * 185 *
186 * Because of that usage pattern, it's only available with CONFIG_HOTPLUG 186 * Because of that usage pattern, it is implemented for small size rather
187 * and is implemented for small size rather than speed. 187 * than speed.
188 */ 188 */
189void __flush_hash_table_range(struct mm_struct *mm, unsigned long start, 189void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
190 unsigned long end) 190 unsigned long end)
191{ 191{
192 int hugepage_shift;
192 unsigned long flags; 193 unsigned long flags;
193 194
194 start = _ALIGN_DOWN(start, PAGE_SIZE); 195 start = _ALIGN_DOWN(start, PAGE_SIZE);
@@ -206,7 +207,8 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
206 local_irq_save(flags); 207 local_irq_save(flags);
207 arch_enter_lazy_mmu_mode(); 208 arch_enter_lazy_mmu_mode();
208 for (; start < end; start += PAGE_SIZE) { 209 for (; start < end; start += PAGE_SIZE) {
209 pte_t *ptep = find_linux_pte(mm->pgd, start); 210 pte_t *ptep = find_linux_pte_or_hugepte(mm->pgd, start,
211 &hugepage_shift);
210 unsigned long pte; 212 unsigned long pte;
211 213
212 if (ptep == NULL) 214 if (ptep == NULL)
@@ -214,7 +216,37 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
214 pte = pte_val(*ptep); 216 pte = pte_val(*ptep);
215 if (!(pte & _PAGE_HASHPTE)) 217 if (!(pte & _PAGE_HASHPTE))
216 continue; 218 continue;
217 hpte_need_flush(mm, start, ptep, pte, 0); 219 if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
220 hpte_do_hugepage_flush(mm, start, (pmd_t *)pte);
221 else
222 hpte_need_flush(mm, start, ptep, pte, 0);
223 }
224 arch_leave_lazy_mmu_mode();
225 local_irq_restore(flags);
226}
227
228void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr)
229{
230 pte_t *pte;
231 pte_t *start_pte;
232 unsigned long flags;
233
234 addr = _ALIGN_DOWN(addr, PMD_SIZE);
235 /* Note: Normally, we should only ever use a batch within a
236 * PTE locked section. This violates the rule, but will work
237 * since we don't actually modify the PTEs, we just flush the
238 * hash while leaving the PTEs intact (including their reference
239 * to being hashed). This is not the most performance oriented
240 * way to do things but is fine for our needs here.
241 */
242 local_irq_save(flags);
243 arch_enter_lazy_mmu_mode();
244 start_pte = pte_offset_map(pmd, addr);
245 for (pte = start_pte; pte < start_pte + PTRS_PER_PTE; pte++) {
246 unsigned long pteval = pte_val(*pte);
247 if (pteval & _PAGE_HASHPTE)
248 hpte_need_flush(mm, addr, pte, pteval, 0);
249 addr += PAGE_SIZE;
218 } 250 }
219 arch_leave_lazy_mmu_mode(); 251 arch_leave_lazy_mmu_mode();
220 local_irq_restore(flags); 252 local_irq_restore(flags);