aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2013-10-31 12:25:05 -0400
committerMatt Fleming <matt.fleming@intel.com>2013-11-02 07:09:28 -0400
commit0bb8aeee7b73b21e09d3ea12f2120d974f70b669 (patch)
treee25ffc5c37466f056d7818e0f73caed0ec32c9d3 /arch/x86/mm
parentc6b6f363f7b24aa448994e3a65c4d5b3116acfcc (diff)
x86/mm/pageattr: Add a PUD error unwinding path
In case we encounter an error during the mapping of a region, we want to unwind what we've established so far exactly the way we did the mapping. This is the PUD part kept deliberately small for easier review. Signed-off-by: Borislav Petkov <bp@suse.de> Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/pageattr.c60
1 files changed, 58 insertions, 2 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 2a1308a8c072..1cbdbbc35b47 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -666,6 +666,51 @@ static int split_large_page(pte_t *kpte, unsigned long address)
666 return 0; 666 return 0;
667} 667}
668 668
669#define unmap_pmd_range(pud, start, pre_end) do {} while (0)
670
671static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
672{
673 pud_t *pud = pud_offset(pgd, start);
674
675 /*
676 * Not on a GB page boundary?
677 */
678 if (start & (PUD_SIZE - 1)) {
679 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK;
680 unsigned long pre_end = min_t(unsigned long, end, next_page);
681
682 unmap_pmd_range(pud, start, pre_end);
683
684 start = pre_end;
685 pud++;
686 }
687
688 /*
689 * Try to unmap in 1G chunks?
690 */
691 while (end - start >= PUD_SIZE) {
692
693 if (pud_large(*pud))
694 pud_clear(pud);
695 else
696 unmap_pmd_range(pud, start, start + PUD_SIZE);
697
698 start += PUD_SIZE;
699 pud++;
700 }
701
702 /*
703 * 2M leftovers?
704 */
705 if (start < end)
706 unmap_pmd_range(pud, start, end);
707
708 /*
709 * No need to try to free the PUD page because we'll free it in
710 * populate_pgd's error path
711 */
712}
713
669static int alloc_pte_page(pmd_t *pmd) 714static int alloc_pte_page(pmd_t *pmd)
670{ 715{
671 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); 716 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
@@ -883,9 +928,20 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
883 pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); 928 pgprot_val(pgprot) |= pgprot_val(cpa->mask_set);
884 929
885 ret = populate_pud(cpa, addr, pgd_entry, pgprot); 930 ret = populate_pud(cpa, addr, pgd_entry, pgprot);
886 if (ret < 0) 931 if (ret < 0) {
887 return ret; 932 unmap_pud_range(pgd_entry, addr,
933 addr + (cpa->numpages << PAGE_SHIFT));
888 934
935 if (allocd_pgd) {
936 /*
937 * If I allocated this PUD page, I can just as well
938 * free it in this error path.
939 */
940 pgd_clear(pgd_entry);
941 free_page((unsigned long)pud);
942 }
943 return ret;
944 }
889 cpa->numpages = ret; 945 cpa->numpages = ret;
890 return 0; 946 return 0;
891} 947}