aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2014-01-18 06:48:16 -0500
committerMatt Fleming <matt.fleming@intel.com>2014-03-04 11:17:17 -0500
commit42a5477251f0e0f33ad5f6a95c48d685ec03191e (patch)
tree616ed2ed4a076090e2ab494c1b1660b8673fe3e1 /arch/x86/mm
parent11cc851254b4bc3bd4430be8db2a41469303a427 (diff)
x86, pageattr: Export page unmapping interface
We will use it in efi so expose it. Signed-off-by: Borislav Petkov <bp@suse.de> Tested-by: Toshi Kani <toshi.kani@hp.com> Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/pageattr.c44
1 files changed, 31 insertions, 13 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index b3b19f46c016..a3488689e301 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -692,6 +692,18 @@ static bool try_to_free_pmd_page(pmd_t *pmd)
692 return true; 692 return true;
693} 693}
694 694
695static bool try_to_free_pud_page(pud_t *pud)
696{
697 int i;
698
699 for (i = 0; i < PTRS_PER_PUD; i++)
700 if (!pud_none(pud[i]))
701 return false;
702
703 free_page((unsigned long)pud);
704 return true;
705}
706
695static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end) 707static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
696{ 708{
697 pte_t *pte = pte_offset_kernel(pmd, start); 709 pte_t *pte = pte_offset_kernel(pmd, start);
@@ -805,6 +817,16 @@ static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
805 */ 817 */
806} 818}
807 819
820static void unmap_pgd_range(pgd_t *root, unsigned long addr, unsigned long end)
821{
822 pgd_t *pgd_entry = root + pgd_index(addr);
823
824 unmap_pud_range(pgd_entry, addr, end);
825
826 if (try_to_free_pud_page((pud_t *)pgd_page_vaddr(*pgd_entry)))
827 pgd_clear(pgd_entry);
828}
829
808static int alloc_pte_page(pmd_t *pmd) 830static int alloc_pte_page(pmd_t *pmd)
809{ 831{
810 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); 832 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
@@ -999,9 +1021,8 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
999static int populate_pgd(struct cpa_data *cpa, unsigned long addr) 1021static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1000{ 1022{
1001 pgprot_t pgprot = __pgprot(_KERNPG_TABLE); 1023 pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
1002 bool allocd_pgd = false;
1003 pgd_t *pgd_entry;
1004 pud_t *pud = NULL; /* shut up gcc */ 1024 pud_t *pud = NULL; /* shut up gcc */
1025 pgd_t *pgd_entry;
1005 int ret; 1026 int ret;
1006 1027
1007 pgd_entry = cpa->pgd + pgd_index(addr); 1028 pgd_entry = cpa->pgd + pgd_index(addr);
@@ -1015,7 +1036,6 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1015 return -1; 1036 return -1;
1016 1037
1017 set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE)); 1038 set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE));
1018 allocd_pgd = true;
1019 } 1039 }
1020 1040
1021 pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); 1041 pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr);
@@ -1023,19 +1043,11 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
1023 1043
1024 ret = populate_pud(cpa, addr, pgd_entry, pgprot); 1044 ret = populate_pud(cpa, addr, pgd_entry, pgprot);
1025 if (ret < 0) { 1045 if (ret < 0) {
1026 unmap_pud_range(pgd_entry, addr, 1046 unmap_pgd_range(cpa->pgd, addr,
1027 addr + (cpa->numpages << PAGE_SHIFT)); 1047 addr + (cpa->numpages << PAGE_SHIFT));
1028
1029 if (allocd_pgd) {
1030 /*
1031 * If I allocated this PUD page, I can just as well
1032 * free it in this error path.
1033 */
1034 pgd_clear(pgd_entry);
1035 free_page((unsigned long)pud);
1036 }
1037 return ret; 1048 return ret;
1038 } 1049 }
1050
1039 cpa->numpages = ret; 1051 cpa->numpages = ret;
1040 return 0; 1052 return 0;
1041} 1053}
@@ -1861,6 +1873,12 @@ out:
1861 return retval; 1873 return retval;
1862} 1874}
1863 1875
1876void kernel_unmap_pages_in_pgd(pgd_t *root, unsigned long address,
1877 unsigned numpages)
1878{
1879 unmap_pgd_range(root, address, address + (numpages << PAGE_SHIFT));
1880}
1881
1864/* 1882/*
1865 * The testcases use internal knowledge of the implementation that shouldn't 1883 * The testcases use internal knowledge of the implementation that shouldn't
1866 * be exposed to the rest of the kernel. Include these directly here. 1884 * be exposed to the rest of the kernel. Include these directly here.