aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorBorislav Petkov <bp@suse.de>2013-10-31 12:25:03 -0400
committerMatt Fleming <matt.fleming@intel.com>2013-11-02 07:09:20 -0400
commitf900a4b8ab0f462d89a9fcb6173cac1403415b16 (patch)
tree727be6e48b2fe3cfd33671eb1941b122bef647ed /arch/x86/mm
parent4b23538d88c87d9c693ad87c8c808e92a505a6e6 (diff)
x86/mm/pageattr: Add a PMD pagetable populating function
Handle PMD-level mappings the same as PUD ones. Signed-off-by: Borislav Petkov <bp@suse.de> Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/pageattr.c82
1 files changed, 81 insertions, 1 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 81deca77b871..968398b023c0 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -666,6 +666,16 @@ static int split_large_page(pte_t *kpte, unsigned long address)
666 return 0; 666 return 0;
667} 667}
668 668
669static int alloc_pte_page(pmd_t *pmd)
670{
671 pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
672 if (!pte)
673 return -1;
674
675 set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
676 return 0;
677}
678
669static int alloc_pmd_page(pud_t *pud) 679static int alloc_pmd_page(pud_t *pud)
670{ 680{
671 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); 681 pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
@@ -676,7 +686,77 @@ static int alloc_pmd_page(pud_t *pud)
676 return 0; 686 return 0;
677} 687}
678 688
679#define populate_pmd(cpa, start, end, pages, pud, pgprot) (-1) 689#define populate_pte(cpa, start, end, pages, pmd, pgprot) do {} while (0)
690
691static int populate_pmd(struct cpa_data *cpa,
692 unsigned long start, unsigned long end,
693 unsigned num_pages, pud_t *pud, pgprot_t pgprot)
694{
695 unsigned int cur_pages = 0;
696 pmd_t *pmd;
697
698 /*
699 * Not on a 2M boundary?
700 */
701 if (start & (PMD_SIZE - 1)) {
702 unsigned long pre_end = start + (num_pages << PAGE_SHIFT);
703 unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
704
705 pre_end = min_t(unsigned long, pre_end, next_page);
706 cur_pages = (pre_end - start) >> PAGE_SHIFT;
707 cur_pages = min_t(unsigned int, num_pages, cur_pages);
708
709 /*
710 * Need a PTE page?
711 */
712 pmd = pmd_offset(pud, start);
713 if (pmd_none(*pmd))
714 if (alloc_pte_page(pmd))
715 return -1;
716
717 populate_pte(cpa, start, pre_end, cur_pages, pmd, pgprot);
718
719 start = pre_end;
720 }
721
722 /*
723 * We mapped them all?
724 */
725 if (num_pages == cur_pages)
726 return cur_pages;
727
728 while (end - start >= PMD_SIZE) {
729
730 /*
731 * We cannot use a 1G page so allocate a PMD page if needed.
732 */
733 if (pud_none(*pud))
734 if (alloc_pmd_page(pud))
735 return -1;
736
737 pmd = pmd_offset(pud, start);
738
739 set_pmd(pmd, __pmd(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot)));
740
741 start += PMD_SIZE;
742 cpa->pfn += PMD_SIZE;
743 cur_pages += PMD_SIZE >> PAGE_SHIFT;
744 }
745
746 /*
747 * Map trailing 4K pages.
748 */
749 if (start < end) {
750 pmd = pmd_offset(pud, start);
751 if (pmd_none(*pmd))
752 if (alloc_pte_page(pmd))
753 return -1;
754
755 populate_pte(cpa, start, end, num_pages - cur_pages,
756 pmd, pgprot);
757 }
758 return num_pages;
759}
680 760
681static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, 761static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
682 pgprot_t pgprot) 762 pgprot_t pgprot)