aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pgtable_32.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pgtable_32.c')
-rw-r--r--arch/x86/mm/pgtable_32.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 2ae5999a795a..cb3aa470249b 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -376,3 +376,26 @@ void check_pgt_cache(void)
376{ 376{
377 quicklist_trim(0, pgd_dtor, 25, 16); 377 quicklist_trim(0, pgd_dtor, 25, 16);
378} 378}
379
380void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
381{
382 paravirt_release_pt(page_to_pfn(pte));
383 tlb_remove_page(tlb, pte);
384}
385
386#ifdef CONFIG_X86_PAE
387
388void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
389{
390 /* This is called just after the pmd has been detached from
391 the pgd, which requires a full tlb flush to be recognized
392 by the CPU. Rather than incurring multiple tlb flushes
393 while the address space is being pulled down, make the tlb
394 gathering machinery do a full flush when we're done. */
395 tlb->fullmm = 1;
396
397 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
398 tlb_remove_page(tlb, virt_to_page(pmd));
399}
400
401#endif