aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorToshi Kani <toshi.kani@hpe.com>2018-06-27 10:13:48 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-07-04 15:37:09 -0400
commit5e0fb5df2ee871b841f96f9cb6a7f2784e96aa4e (patch)
tree4f8d20837be1bbd67ba2db4561e1bf0fcb4da495
parent785a19f9d1dd8a4ab2d0633be4656653bd3de1fc (diff)
x86/mm: Add TLB purge to free pmd/pte page interfaces
ioremap() calls pud_free_pmd_page() / pmd_free_pte_page() when it creates a pud / pmd map. The following preconditions are met at their entry. - All pte entries for a target pud/pmd address range have been cleared. - System-wide TLB purges have been peformed for a target pud/pmd address range. The preconditions assure that there is no stale TLB entry for the range. Speculation may not cache TLB entries since it requires all levels of page entries, including ptes, to have P & A-bits set for an associated address. However, speculation may cache pud/pmd entries (paging-structure caches) when they have P-bit set. Add a system-wide TLB purge (INVLPG) to a single page after clearing pud/pmd entry's P-bit. SDM 4.10.4.1, Operation that Invalidate TLBs and Paging-Structure Caches, states that: INVLPG invalidates all paging-structure caches associated with the current PCID regardless of the liner addresses to which they correspond. Fixes: 28ee90fe6048 ("x86/mm: implement free pmd/pte page interfaces") Signed-off-by: Toshi Kani <toshi.kani@hpe.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: mhocko@suse.com Cc: akpm@linux-foundation.org Cc: hpa@zytor.com Cc: cpandya@codeaurora.org Cc: linux-mm@kvack.org Cc: linux-arm-kernel@lists.infradead.org Cc: Joerg Roedel <joro@8bytes.org> Cc: stable@vger.kernel.org Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <stable@vger.kernel.org> Link: https://lkml.kernel.org/r/20180627141348.21777-4-toshi.kani@hpe.com
-rw-r--r--arch/x86/mm/pgtable.c36
1 files changed, 30 insertions, 6 deletions
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index fbd14e506758..e3deefb891da 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -725,24 +725,44 @@ int pmd_clear_huge(pmd_t *pmd)
725 * @pud: Pointer to a PUD. 725 * @pud: Pointer to a PUD.
726 * @addr: Virtual address associated with pud. 726 * @addr: Virtual address associated with pud.
727 * 727 *
728 * Context: The pud range has been unmaped and TLB purged. 728 * Context: The pud range has been unmapped and TLB purged.
729 * Return: 1 if clearing the entry succeeded. 0 otherwise. 729 * Return: 1 if clearing the entry succeeded. 0 otherwise.
730 *
731 * NOTE: Callers must allow a single page allocation.
730 */ 732 */
731int pud_free_pmd_page(pud_t *pud, unsigned long addr) 733int pud_free_pmd_page(pud_t *pud, unsigned long addr)
732{ 734{
733 pmd_t *pmd; 735 pmd_t *pmd, *pmd_sv;
736 pte_t *pte;
734 int i; 737 int i;
735 738
736 if (pud_none(*pud)) 739 if (pud_none(*pud))
737 return 1; 740 return 1;
738 741
739 pmd = (pmd_t *)pud_page_vaddr(*pud); 742 pmd = (pmd_t *)pud_page_vaddr(*pud);
743 pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
744 if (!pmd_sv)
745 return 0;
740 746
741 for (i = 0; i < PTRS_PER_PMD; i++) 747 for (i = 0; i < PTRS_PER_PMD; i++) {
742 if (!pmd_free_pte_page(&pmd[i], addr + (i * PMD_SIZE))) 748 pmd_sv[i] = pmd[i];
743 return 0; 749 if (!pmd_none(pmd[i]))
750 pmd_clear(&pmd[i]);
751 }
744 752
745 pud_clear(pud); 753 pud_clear(pud);
754
755 /* INVLPG to clear all paging-structure caches */
756 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
757
758 for (i = 0; i < PTRS_PER_PMD; i++) {
759 if (!pmd_none(pmd_sv[i])) {
760 pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
761 free_page((unsigned long)pte);
762 }
763 }
764
765 free_page((unsigned long)pmd_sv);
746 free_page((unsigned long)pmd); 766 free_page((unsigned long)pmd);
747 767
748 return 1; 768 return 1;
@@ -753,7 +773,7 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr)
753 * @pmd: Pointer to a PMD. 773 * @pmd: Pointer to a PMD.
754 * @addr: Virtual address associated with pmd. 774 * @addr: Virtual address associated with pmd.
755 * 775 *
756 * Context: The pmd range has been unmaped and TLB purged. 776 * Context: The pmd range has been unmapped and TLB purged.
757 * Return: 1 if clearing the entry succeeded. 0 otherwise. 777 * Return: 1 if clearing the entry succeeded. 0 otherwise.
758 */ 778 */
759int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) 779int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
@@ -765,6 +785,10 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
765 785
766 pte = (pte_t *)pmd_page_vaddr(*pmd); 786 pte = (pte_t *)pmd_page_vaddr(*pmd);
767 pmd_clear(pmd); 787 pmd_clear(pmd);
788
789 /* INVLPG to clear all paging-structure caches */
790 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
791
768 free_page((unsigned long)pte); 792 free_page((unsigned long)pte);
769 793
770 return 1; 794 return 1;