aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2014-11-03 08:02:02 -0500
committerThomas Gleixner <tglx@linutronix.de>2014-11-16 05:04:26 -0500
commitf5b2831d654167d77da8afbef4d2584897b12d0c (patch)
tree3370b6a8303fdb8ef1b77e07a08391592e29ffa2 /arch/x86/mm
parentf439c429c320981943f8b64b2a4049d946cb492b (diff)
x86: Respect PAT bit when copying pte values between large and normal pages
The PAT bit in the ptes is not moved to the correct position when copying page protection attributes between entries of different sized pages. Translate the ptes according to their page size. Based-on-patch-by: Stefan Bader <stefan.bader@canonical.com> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: stefan.bader@canonical.com Cc: xen-devel@lists.xensource.com Cc: konrad.wilk@oracle.com Cc: ville.syrjala@linux.intel.com Cc: david.vrabel@citrix.com Cc: jbeulich@suse.com Cc: toshi.kani@hp.com Cc: plagnioj@jcrosoft.com Cc: tomi.valkeinen@ti.com Cc: bhelgaas@google.com Link: http://lkml.kernel.org/r/1415019724-4317-17-git-send-email-jgross@suse.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/pageattr.c33
1 files changed, 23 insertions, 10 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index de807c9daad1..6c8e3fdaf077 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -485,14 +485,23 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
485 485
486 /* 486 /*
487 * We are safe now. Check whether the new pgprot is the same: 487 * We are safe now. Check whether the new pgprot is the same:
488 * Convert protection attributes to 4k-format, as cpa->mask* are set
489 * up accordingly.
488 */ 490 */
489 old_pte = *kpte; 491 old_pte = *kpte;
490 old_prot = req_prot = pte_pgprot(old_pte); 492 old_prot = req_prot = pgprot_large_2_4k(pte_pgprot(old_pte));
491 493
492 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); 494 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
493 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); 495 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
494 496
495 /* 497 /*
498 * req_prot is in format of 4k pages. It must be converted to large
499 * page format: the caching mode includes the PAT bit located at
500 * different bit positions in the two formats.
501 */
502 req_prot = pgprot_4k_2_large(req_prot);
503
504 /*
496 * Set the PSE and GLOBAL flags only if the PRESENT flag is 505 * Set the PSE and GLOBAL flags only if the PRESENT flag is
497 * set otherwise pmd_present/pmd_huge will return true even on 506 * set otherwise pmd_present/pmd_huge will return true even on
498 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL 507 * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
@@ -585,13 +594,10 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
585 594
586 paravirt_alloc_pte(&init_mm, page_to_pfn(base)); 595 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
587 ref_prot = pte_pgprot(pte_clrhuge(*kpte)); 596 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
588 /* 597
589 * If we ever want to utilize the PAT bit, we need to 598 /* promote PAT bit to correct position */
590 * update this function to make sure it's converted from 599 if (level == PG_LEVEL_2M)
591 * bit 12 to bit 7 when we cross from the 2MB level to 600 ref_prot = pgprot_large_2_4k(ref_prot);
592 * the 4K level:
593 */
594 WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
595 601
596#ifdef CONFIG_X86_64 602#ifdef CONFIG_X86_64
597 if (level == PG_LEVEL_1G) { 603 if (level == PG_LEVEL_1G) {
@@ -879,6 +885,7 @@ static int populate_pmd(struct cpa_data *cpa,
879{ 885{
880 unsigned int cur_pages = 0; 886 unsigned int cur_pages = 0;
881 pmd_t *pmd; 887 pmd_t *pmd;
888 pgprot_t pmd_pgprot;
882 889
883 /* 890 /*
884 * Not on a 2M boundary? 891 * Not on a 2M boundary?
@@ -910,6 +917,8 @@ static int populate_pmd(struct cpa_data *cpa,
910 if (num_pages == cur_pages) 917 if (num_pages == cur_pages)
911 return cur_pages; 918 return cur_pages;
912 919
920 pmd_pgprot = pgprot_4k_2_large(pgprot);
921
913 while (end - start >= PMD_SIZE) { 922 while (end - start >= PMD_SIZE) {
914 923
915 /* 924 /*
@@ -921,7 +930,8 @@ static int populate_pmd(struct cpa_data *cpa,
921 930
922 pmd = pmd_offset(pud, start); 931 pmd = pmd_offset(pud, start);
923 932
924 set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); 933 set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE |
934 massage_pgprot(pmd_pgprot)));
925 935
926 start += PMD_SIZE; 936 start += PMD_SIZE;
927 cpa->pfn += PMD_SIZE; 937 cpa->pfn += PMD_SIZE;
@@ -949,6 +959,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
949 pud_t *pud; 959 pud_t *pud;
950 unsigned long end; 960 unsigned long end;
951 int cur_pages = 0; 961 int cur_pages = 0;
962 pgprot_t pud_pgprot;
952 963
953 end = start + (cpa->numpages << PAGE_SHIFT); 964 end = start + (cpa->numpages << PAGE_SHIFT);
954 965
@@ -986,12 +997,14 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
986 return cur_pages; 997 return cur_pages;
987 998
988 pud = pud_offset(pgd, start); 999 pud = pud_offset(pgd, start);
1000 pud_pgprot = pgprot_4k_2_large(pgprot);
989 1001
990 /* 1002 /*
991 * Map everything starting from the Gb boundary, possibly with 1G pages 1003 * Map everything starting from the Gb boundary, possibly with 1G pages
992 */ 1004 */
993 while (end - start >= PUD_SIZE) { 1005 while (end - start >= PUD_SIZE) {
994 set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); 1006 set_pud(pud, __pud(cpa->pfn | _PAGE_PSE |
1007 massage_pgprot(pud_pgprot)));
995 1008
996 start += PUD_SIZE; 1009 start += PUD_SIZE;
997 cpa->pfn += PUD_SIZE; 1010 cpa->pfn += PUD_SIZE;