aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-09-17 10:29:14 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-09-27 14:39:39 -0400
commitf61c5ba2885eabc7bc4b0b2f5232f475216ba446 (patch)
treec37d356a5996651e51cc648db4b84293e45eb4d4
parent69c31e69df3d2efc4ad7f53d500fdd920d3865a4 (diff)
x86/mm/cpa: Add sanity check for existing mappings
With the range check it is possible to do a quick verification that the current mapping is correct vs. the static protection areas. In case a incorrect mapping is detected a warning is emitted and the large page is split up. If the large page is a 2M page, then the split code is forced to check the static protections for the PTE entries to fix up the incorrectness. For 1G pages this can't be done easily because that would require to either find the offending 2M areas before the split or afterwards. For now just warn about that case and revisit it when reported. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Dave Hansen <dave.hansen@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Bin Yang <bin.yang@intel.com> Cc: Mark Gross <mark.gross@intel.com> Link: https://lkml.kernel.org/r/20180917143546.331408643@linutronix.de
-rw-r--r--arch/x86/mm/pageattr.c77
1 files changed, 67 insertions, 10 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 8f9083eb21ac..19781b0ab4b4 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -37,12 +37,14 @@ struct cpa_data {
37 unsigned long numpages; 37 unsigned long numpages;
38 int flags; 38 int flags;
39 unsigned long pfn; 39 unsigned long pfn;
40 unsigned force_split : 1; 40 unsigned force_split : 1,
41 force_static_prot : 1;
41 int curpage; 42 int curpage;
42 struct page **pages; 43 struct page **pages;
43}; 44};
44 45
45enum cpa_warn { 46enum cpa_warn {
47 CPA_CONFLICT,
46 CPA_PROTECT, 48 CPA_PROTECT,
47 CPA_DETECT, 49 CPA_DETECT,
48}; 50};
@@ -501,6 +503,7 @@ static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
501 unsigned long pfn, const char *txt) 503 unsigned long pfn, const char *txt)
502{ 504{
503 static const char *lvltxt[] = { 505 static const char *lvltxt[] = {
506 [CPA_CONFLICT] = "conflict",
504 [CPA_PROTECT] = "protect", 507 [CPA_PROTECT] = "protect",
505 [CPA_DETECT] = "detect", 508 [CPA_DETECT] = "detect",
506 }; 509 };
@@ -743,7 +746,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
743 struct cpa_data *cpa) 746 struct cpa_data *cpa)
744{ 747{
745 unsigned long numpages, pmask, psize, lpaddr, addr, pfn, old_pfn; 748 unsigned long numpages, pmask, psize, lpaddr, addr, pfn, old_pfn;
746 pgprot_t old_prot, new_prot, req_prot; 749 pgprot_t old_prot, new_prot, req_prot, chk_prot;
747 pte_t new_pte, old_pte, *tmp; 750 pte_t new_pte, old_pte, *tmp;
748 enum pg_level level; 751 enum pg_level level;
749 int i; 752 int i;
@@ -820,6 +823,23 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
820 numpages = psize >> PAGE_SHIFT; 823 numpages = psize >> PAGE_SHIFT;
821 824
822 /* 825 /*
826 * Sanity check that the existing mapping is correct versus the static
827 * protections. static_protections() guards against !PRESENT, so no
828 * extra conditional required here.
829 */
830 chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
831 CPA_CONFLICT);
832
833 if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
834 /*
835 * Split the large page and tell the split code to
836 * enforce static protections.
837 */
838 cpa->force_static_prot = 1;
839 return 1;
840 }
841
842 /*
823 * Make sure that the requested pgprot does not violate the static 843 * Make sure that the requested pgprot does not violate the static
824 * protections. Check the full large page whether one of the pages 844 * protections. Check the full large page whether one of the pages
825 * in it results in a different pgprot than the first one of the 845 * in it results in a different pgprot than the first one of the
@@ -828,8 +848,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
828 new_prot = static_protections(req_prot, address, pfn, 1, CPA_DETECT); 848 new_prot = static_protections(req_prot, address, pfn, 1, CPA_DETECT);
829 pfn = old_pfn; 849 pfn = old_pfn;
830 for (i = 0, addr = lpaddr; i < numpages; i++, addr += PAGE_SIZE, pfn++) { 850 for (i = 0, addr = lpaddr; i < numpages; i++, addr += PAGE_SIZE, pfn++) {
831 pgprot_t chk_prot = static_protections(req_prot, addr, pfn, 1, 851 chk_prot = static_protections(req_prot, addr, pfn, 1,
832 CPA_DETECT); 852 CPA_DETECT);
833 cpa_inc_4k_checked(); 853 cpa_inc_4k_checked();
834 if (pgprot_val(chk_prot) != pgprot_val(new_prot)) 854 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
835 return 1; 855 return 1;
@@ -871,15 +891,50 @@ static int should_split_large_page(pte_t *kpte, unsigned long address,
871 return do_split; 891 return do_split;
872} 892}
873 893
894static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
895 pgprot_t ref_prot, unsigned long address,
896 unsigned long size)
897{
898 unsigned int npg = PFN_DOWN(size);
899 pgprot_t prot;
900
901 /*
902 * If should_split_large_page() discovered an inconsistent mapping,
903 * remove the invalid protection in the split mapping.
904 */
905 if (!cpa->force_static_prot)
906 goto set;
907
908 prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT);
909
910 if (pgprot_val(prot) == pgprot_val(ref_prot))
911 goto set;
912
913 /*
914 * If this is splitting a PMD, fix it up. PUD splits cannot be
915 * fixed trivially as that would require to rescan the newly
916 * installed PMD mappings after returning from split_large_page()
917 * so an eventual further split can allocate the necessary PTE
918 * pages. Warn for now and revisit it in case this actually
919 * happens.
920 */
921 if (size == PAGE_SIZE)
922 ref_prot = prot;
923 else
924 pr_warn_once("CPA: Cannot fixup static protections for PUD split\n");
925set:
926 set_pte(pte, pfn_pte(pfn, ref_prot));
927}
928
874static int 929static int
875__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, 930__split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
876 struct page *base) 931 struct page *base)
877{ 932{
933 unsigned long lpaddr, lpinc, ref_pfn, pfn, pfninc = 1;
878 pte_t *pbase = (pte_t *)page_address(base); 934 pte_t *pbase = (pte_t *)page_address(base);
879 unsigned long ref_pfn, pfn, pfninc = 1;
880 unsigned int i, level; 935 unsigned int i, level;
881 pte_t *tmp;
882 pgprot_t ref_prot; 936 pgprot_t ref_prot;
937 pte_t *tmp;
883 938
884 spin_lock(&pgd_lock); 939 spin_lock(&pgd_lock);
885 /* 940 /*
@@ -902,15 +957,17 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
902 * PAT bit to correct position. 957 * PAT bit to correct position.
903 */ 958 */
904 ref_prot = pgprot_large_2_4k(ref_prot); 959 ref_prot = pgprot_large_2_4k(ref_prot);
905
906 ref_pfn = pmd_pfn(*(pmd_t *)kpte); 960 ref_pfn = pmd_pfn(*(pmd_t *)kpte);
961 lpaddr = address & PMD_MASK;
962 lpinc = PAGE_SIZE;
907 break; 963 break;
908 964
909 case PG_LEVEL_1G: 965 case PG_LEVEL_1G:
910 ref_prot = pud_pgprot(*(pud_t *)kpte); 966 ref_prot = pud_pgprot(*(pud_t *)kpte);
911 ref_pfn = pud_pfn(*(pud_t *)kpte); 967 ref_pfn = pud_pfn(*(pud_t *)kpte);
912 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; 968 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
913 969 lpaddr = address & PUD_MASK;
970 lpinc = PMD_SIZE;
914 /* 971 /*
915 * Clear the PSE flags if the PRESENT flag is not set 972 * Clear the PSE flags if the PRESENT flag is not set
916 * otherwise pmd_present/pmd_huge will return true 973 * otherwise pmd_present/pmd_huge will return true
@@ -931,8 +988,8 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
931 * Get the target pfn from the original entry: 988 * Get the target pfn from the original entry:
932 */ 989 */
933 pfn = ref_pfn; 990 pfn = ref_pfn;
934 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) 991 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc, lpaddr += lpinc)
935 set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); 992 split_set_pte(cpa, pbase + i, pfn, ref_prot, lpaddr, lpinc);
936 993
937 if (virt_addr_valid(address)) { 994 if (virt_addr_valid(address)) {
938 unsigned long pfn = PFN_DOWN(__pa(address)); 995 unsigned long pfn = PFN_DOWN(__pa(address));