aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChintan Pandya <cpandya@codeaurora.org>2018-06-27 10:13:47 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-07-04 15:37:08 -0400
commit785a19f9d1dd8a4ab2d0633be4656653bd3de1fc (patch)
tree972fe9d837f28bdafa492a09f2fafd0f54b1cde0
parentf967db0b9ed44ec3057a28f3b28efc51df51b835 (diff)
ioremap: Update pgtable free interfaces with addr
The following kernel panic was observed on ARM64 platform due to a stale TLB entry. 1. ioremap with 4K size, a valid pte page table is set. 2. iounmap it, its pte entry is set to 0. 3. ioremap the same address with 2M size, update its pmd entry with a new value. 4. CPU may hit an exception because the old pmd entry is still in TLB, which leads to a kernel panic. Commit b6bdb7517c3d ("mm/vmalloc: add interfaces to free unmapped page table") has addressed this panic by falling to pte mappings in the above case on ARM64. To support pmd mappings in all cases, TLB purge needs to be performed in this case on ARM64. Add a new arg, 'addr', to pud_free_pmd_page() and pmd_free_pte_page() so that TLB purge can be added later in seprate patches. [toshi.kani@hpe.com: merge changes, rewrite patch description] Fixes: 28ee90fe6048 ("x86/mm: implement free pmd/pte page interfaces") Signed-off-by: Chintan Pandya <cpandya@codeaurora.org> Signed-off-by: Toshi Kani <toshi.kani@hpe.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: mhocko@suse.com Cc: akpm@linux-foundation.org Cc: hpa@zytor.com Cc: linux-mm@kvack.org Cc: linux-arm-kernel@lists.infradead.org Cc: Will Deacon <will.deacon@arm.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: stable@vger.kernel.org Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Michal Hocko <mhocko@suse.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: <stable@vger.kernel.org> Link: https://lkml.kernel.org/r/20180627141348.21777-3-toshi.kani@hpe.com
-rw-r--r--arch/arm64/mm/mmu.c4
-rw-r--r--arch/x86/mm/pgtable.c12
-rw-r--r--include/asm-generic/pgtable.h8
-rw-r--r--lib/ioremap.c4
4 files changed, 15 insertions, 13 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 493ff75670ff..8ae5d7ae4af3 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -977,12 +977,12 @@ int pmd_clear_huge(pmd_t *pmdp)
977 return 1; 977 return 1;
978} 978}
979 979
980int pud_free_pmd_page(pud_t *pud) 980int pud_free_pmd_page(pud_t *pud, unsigned long addr)
981{ 981{
982 return pud_none(*pud); 982 return pud_none(*pud);
983} 983}
984 984
985int pmd_free_pte_page(pmd_t *pmd) 985int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
986{ 986{
987 return pmd_none(*pmd); 987 return pmd_none(*pmd);
988} 988}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 1aeb7a5dbce5..fbd14e506758 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -723,11 +723,12 @@ int pmd_clear_huge(pmd_t *pmd)
723/** 723/**
724 * pud_free_pmd_page - Clear pud entry and free pmd page. 724 * pud_free_pmd_page - Clear pud entry and free pmd page.
725 * @pud: Pointer to a PUD. 725 * @pud: Pointer to a PUD.
726 * @addr: Virtual address associated with pud.
726 * 727 *
727 * Context: The pud range has been unmaped and TLB purged. 728 * Context: The pud range has been unmaped and TLB purged.
728 * Return: 1 if clearing the entry succeeded. 0 otherwise. 729 * Return: 1 if clearing the entry succeeded. 0 otherwise.
729 */ 730 */
730int pud_free_pmd_page(pud_t *pud) 731int pud_free_pmd_page(pud_t *pud, unsigned long addr)
731{ 732{
732 pmd_t *pmd; 733 pmd_t *pmd;
733 int i; 734 int i;
@@ -738,7 +739,7 @@ int pud_free_pmd_page(pud_t *pud)
738 pmd = (pmd_t *)pud_page_vaddr(*pud); 739 pmd = (pmd_t *)pud_page_vaddr(*pud);
739 740
740 for (i = 0; i < PTRS_PER_PMD; i++) 741 for (i = 0; i < PTRS_PER_PMD; i++)
741 if (!pmd_free_pte_page(&pmd[i])) 742 if (!pmd_free_pte_page(&pmd[i], addr + (i * PMD_SIZE)))
742 return 0; 743 return 0;
743 744
744 pud_clear(pud); 745 pud_clear(pud);
@@ -750,11 +751,12 @@ int pud_free_pmd_page(pud_t *pud)
750/** 751/**
751 * pmd_free_pte_page - Clear pmd entry and free pte page. 752 * pmd_free_pte_page - Clear pmd entry and free pte page.
752 * @pmd: Pointer to a PMD. 753 * @pmd: Pointer to a PMD.
754 * @addr: Virtual address associated with pmd.
753 * 755 *
754 * Context: The pmd range has been unmaped and TLB purged. 756 * Context: The pmd range has been unmaped and TLB purged.
755 * Return: 1 if clearing the entry succeeded. 0 otherwise. 757 * Return: 1 if clearing the entry succeeded. 0 otherwise.
756 */ 758 */
757int pmd_free_pte_page(pmd_t *pmd) 759int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
758{ 760{
759 pte_t *pte; 761 pte_t *pte;
760 762
@@ -770,7 +772,7 @@ int pmd_free_pte_page(pmd_t *pmd)
770 772
771#else /* !CONFIG_X86_64 */ 773#else /* !CONFIG_X86_64 */
772 774
773int pud_free_pmd_page(pud_t *pud) 775int pud_free_pmd_page(pud_t *pud, unsigned long addr)
774{ 776{
775 return pud_none(*pud); 777 return pud_none(*pud);
776} 778}
@@ -779,7 +781,7 @@ int pud_free_pmd_page(pud_t *pud)
779 * Disable free page handling on x86-PAE. This assures that ioremap() 781 * Disable free page handling on x86-PAE. This assures that ioremap()
780 * does not update sync'd pmd entries. See vmalloc_sync_one(). 782 * does not update sync'd pmd entries. See vmalloc_sync_one().
781 */ 783 */
782int pmd_free_pte_page(pmd_t *pmd) 784int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
783{ 785{
784 return pmd_none(*pmd); 786 return pmd_none(*pmd);
785} 787}
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f59639afaa39..b081794ba135 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
1019int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot); 1019int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
1020int pud_clear_huge(pud_t *pud); 1020int pud_clear_huge(pud_t *pud);
1021int pmd_clear_huge(pmd_t *pmd); 1021int pmd_clear_huge(pmd_t *pmd);
1022int pud_free_pmd_page(pud_t *pud); 1022int pud_free_pmd_page(pud_t *pud, unsigned long addr);
1023int pmd_free_pte_page(pmd_t *pmd); 1023int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
1024#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */ 1024#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
1025static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) 1025static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
1026{ 1026{
@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
1046{ 1046{
1047 return 0; 1047 return 0;
1048} 1048}
1049static inline int pud_free_pmd_page(pud_t *pud) 1049static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1050{ 1050{
1051 return 0; 1051 return 0;
1052} 1052}
1053static inline int pmd_free_pte_page(pmd_t *pmd) 1053static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1054{ 1054{
1055 return 0; 1055 return 0;
1056} 1056}
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 54e5bbaa3200..517f5853ffed 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -92,7 +92,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
92 if (ioremap_pmd_enabled() && 92 if (ioremap_pmd_enabled() &&
93 ((next - addr) == PMD_SIZE) && 93 ((next - addr) == PMD_SIZE) &&
94 IS_ALIGNED(phys_addr + addr, PMD_SIZE) && 94 IS_ALIGNED(phys_addr + addr, PMD_SIZE) &&
95 pmd_free_pte_page(pmd)) { 95 pmd_free_pte_page(pmd, addr)) {
96 if (pmd_set_huge(pmd, phys_addr + addr, prot)) 96 if (pmd_set_huge(pmd, phys_addr + addr, prot))
97 continue; 97 continue;
98 } 98 }
@@ -119,7 +119,7 @@ static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr,
119 if (ioremap_pud_enabled() && 119 if (ioremap_pud_enabled() &&
120 ((next - addr) == PUD_SIZE) && 120 ((next - addr) == PUD_SIZE) &&
121 IS_ALIGNED(phys_addr + addr, PUD_SIZE) && 121 IS_ALIGNED(phys_addr + addr, PUD_SIZE) &&
122 pud_free_pmd_page(pud)) { 122 pud_free_pmd_page(pud, addr)) {
123 if (pud_set_huge(pud, phys_addr + addr, prot)) 123 if (pud_set_huge(pud, phys_addr + addr, prot))
124 continue; 124 continue;
125 } 125 }