diff options
author | Borislav Petkov <bp@suse.de> | 2013-10-31 12:25:02 -0400 |
---|---|---|
committer | Matt Fleming <matt.fleming@intel.com> | 2013-11-02 07:09:17 -0400 |
commit | 4b23538d88c87d9c693ad87c8c808e92a505a6e6 (patch) | |
tree | bb894535dd837d8c6529a6b42af92c882b4d20a9 /arch/x86/mm | |
parent | f3f729661e8db476ac427a97de015307aebb7404 (diff) |
x86/mm/pageattr: Add a PUD pagetable populating function
Add the next level of the pagetable populating function, we handle
chunks around a 1G boundary by mapping them with the lower level
functions - otherwise we use 1G pages for the mappings, thus using as
less amount of pagetable pages as possible.
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/pageattr.c | 87 |
1 files changed, 86 insertions, 1 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4b47ae0602e1..81deca77b871 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -666,7 +666,92 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
666 | return 0; | 666 | return 0; |
667 | } | 667 | } |
668 | 668 | ||
669 | #define populate_pud(cpa, addr, pgd, pgprot) (-1) | 669 | static int alloc_pmd_page(pud_t *pud) |
670 | { | ||
671 | pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); | ||
672 | if (!pmd) | ||
673 | return -1; | ||
674 | |||
675 | set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); | ||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | #define populate_pmd(cpa, start, end, pages, pud, pgprot) (-1) | ||
680 | |||
681 | static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, | ||
682 | pgprot_t pgprot) | ||
683 | { | ||
684 | pud_t *pud; | ||
685 | unsigned long end; | ||
686 | int cur_pages = 0; | ||
687 | |||
688 | end = start + (cpa->numpages << PAGE_SHIFT); | ||
689 | |||
690 | /* | ||
691 | * Not on a Gb page boundary? => map everything up to it with | ||
692 | * smaller pages. | ||
693 | */ | ||
694 | if (start & (PUD_SIZE - 1)) { | ||
695 | unsigned long pre_end; | ||
696 | unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; | ||
697 | |||
698 | pre_end = min_t(unsigned long, end, next_page); | ||
699 | cur_pages = (pre_end - start) >> PAGE_SHIFT; | ||
700 | cur_pages = min_t(int, (int)cpa->numpages, cur_pages); | ||
701 | |||
702 | pud = pud_offset(pgd, start); | ||
703 | |||
704 | /* | ||
705 | * Need a PMD page? | ||
706 | */ | ||
707 | if (pud_none(*pud)) | ||
708 | if (alloc_pmd_page(pud)) | ||
709 | return -1; | ||
710 | |||
711 | cur_pages = populate_pmd(cpa, start, pre_end, cur_pages, | ||
712 | pud, pgprot); | ||
713 | if (cur_pages < 0) | ||
714 | return cur_pages; | ||
715 | |||
716 | start = pre_end; | ||
717 | } | ||
718 | |||
719 | /* We mapped them all? */ | ||
720 | if (cpa->numpages == cur_pages) | ||
721 | return cur_pages; | ||
722 | |||
723 | pud = pud_offset(pgd, start); | ||
724 | |||
725 | /* | ||
726 | * Map everything starting from the Gb boundary, possibly with 1G pages | ||
727 | */ | ||
728 | while (end - start >= PUD_SIZE) { | ||
729 | set_pud(pud, __pud(cpa->pfn | _PAGE_PSE | massage_pgprot(pgprot))); | ||
730 | |||
731 | start += PUD_SIZE; | ||
732 | cpa->pfn += PUD_SIZE; | ||
733 | cur_pages += PUD_SIZE >> PAGE_SHIFT; | ||
734 | pud++; | ||
735 | } | ||
736 | |||
737 | /* Map trailing leftover */ | ||
738 | if (start < end) { | ||
739 | int tmp; | ||
740 | |||
741 | pud = pud_offset(pgd, start); | ||
742 | if (pud_none(*pud)) | ||
743 | if (alloc_pmd_page(pud)) | ||
744 | return -1; | ||
745 | |||
746 | tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, | ||
747 | pud, pgprot); | ||
748 | if (tmp < 0) | ||
749 | return cur_pages; | ||
750 | |||
751 | cur_pages += tmp; | ||
752 | } | ||
753 | return cur_pages; | ||
754 | } | ||
670 | 755 | ||
671 | /* | 756 | /* |
672 | * Restrictions for kernel page table do not necessarily apply when mapping in | 757 | * Restrictions for kernel page table do not necessarily apply when mapping in |