summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTom Lendacky <thomas.lendacky@amd.com>2018-01-10 14:26:16 -0500
committerIngo Molnar <mingo@kernel.org>2018-01-15 19:50:58 -0500
commit2b5d00b6c2cdd94f6d6a494a6f6c0c0fc7b8e711 (patch)
treeabfb1332c04ee4ebf69c8759e61fea0827d653fe
parentbacf6b499e11760aef73a3bb5ce4e5eea74a3fd4 (diff)
x86/mm: Centralize PMD flags in sme_encrypt_kernel()
In preparation for encrypting more than just the kernel during early boot processing, centralize the use of the PMD flag settings based on the type of mapping desired. When 4KB aligned encryption is added, this will allow either PTE flags or large page PMD flags to be used without requiring the caller to adjust. Tested-by: Gabriel Craciunescu <nix.or.die@gmail.com> Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Reviewed-by: Borislav Petkov <bp@suse.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brijesh Singh <brijesh.singh@amd.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20180110192615.6026.14767.stgit@tlendack-t1.amdoffice.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/mm/mem_encrypt.c133
1 files changed, 77 insertions, 56 deletions
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 5a20696c5440..35f38caa1fa3 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -468,31 +468,39 @@ struct sme_populate_pgd_data {
468 void *pgtable_area; 468 void *pgtable_area;
469 pgd_t *pgd; 469 pgd_t *pgd;
470 470
471 pmdval_t pmd_val; 471 pmdval_t pmd_flags;
472 unsigned long paddr;
473
472 unsigned long vaddr; 474 unsigned long vaddr;
475 unsigned long vaddr_end;
473}; 476};
474 477
475static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, 478static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
476 unsigned long end)
477{ 479{
478 unsigned long pgd_start, pgd_end, pgd_size; 480 unsigned long pgd_start, pgd_end, pgd_size;
479 pgd_t *pgd_p; 481 pgd_t *pgd_p;
480 482
481 pgd_start = start & PGDIR_MASK; 483 pgd_start = ppd->vaddr & PGDIR_MASK;
482 pgd_end = end & PGDIR_MASK; 484 pgd_end = ppd->vaddr_end & PGDIR_MASK;
483 485
484 pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1); 486 pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
485 pgd_size *= sizeof(pgd_t);
486 487
487 pgd_p = pgd_base + pgd_index(start); 488 pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
488 489
489 memset(pgd_p, 0, pgd_size); 490 memset(pgd_p, 0, pgd_size);
490} 491}
491 492
492#define PGD_FLAGS _KERNPG_TABLE_NOENC 493#define PGD_FLAGS _KERNPG_TABLE_NOENC
493#define P4D_FLAGS _KERNPG_TABLE_NOENC 494#define P4D_FLAGS _KERNPG_TABLE_NOENC
494#define PUD_FLAGS _KERNPG_TABLE_NOENC 495#define PUD_FLAGS _KERNPG_TABLE_NOENC
495#define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) 496
497#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
498
499#define PMD_FLAGS_DEC PMD_FLAGS_LARGE
500#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
501 (_PAGE_PAT | _PAGE_PWT))
502
503#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
496 504
497static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) 505static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
498{ 506{
@@ -561,7 +569,35 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
561 569
562 pmd_p += pmd_index(ppd->vaddr); 570 pmd_p += pmd_index(ppd->vaddr);
563 if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) 571 if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE))
564 native_set_pmd(pmd_p, native_make_pmd(ppd->pmd_val)); 572 native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags));
573}
574
575static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
576 pmdval_t pmd_flags)
577{
578 ppd->pmd_flags = pmd_flags;
579
580 while (ppd->vaddr < ppd->vaddr_end) {
581 sme_populate_pgd_large(ppd);
582
583 ppd->vaddr += PMD_PAGE_SIZE;
584 ppd->paddr += PMD_PAGE_SIZE;
585 }
586}
587
588static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
589{
590 __sme_map_range(ppd, PMD_FLAGS_ENC);
591}
592
593static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
594{
595 __sme_map_range(ppd, PMD_FLAGS_DEC);
596}
597
598static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
599{
600 __sme_map_range(ppd, PMD_FLAGS_DEC_WP);
565} 601}
566 602
567static unsigned long __init sme_pgtable_calc(unsigned long len) 603static unsigned long __init sme_pgtable_calc(unsigned long len)
@@ -621,7 +657,6 @@ void __init sme_encrypt_kernel(void)
621 unsigned long kernel_start, kernel_end, kernel_len; 657 unsigned long kernel_start, kernel_end, kernel_len;
622 struct sme_populate_pgd_data ppd; 658 struct sme_populate_pgd_data ppd;
623 unsigned long pgtable_area_len; 659 unsigned long pgtable_area_len;
624 unsigned long paddr, pmd_flags;
625 unsigned long decrypted_base; 660 unsigned long decrypted_base;
626 661
627 if (!sme_active()) 662 if (!sme_active())
@@ -693,14 +728,10 @@ void __init sme_encrypt_kernel(void)
693 * addressing the workarea. 728 * addressing the workarea.
694 */ 729 */
695 ppd.pgd = (pgd_t *)native_read_cr3_pa(); 730 ppd.pgd = (pgd_t *)native_read_cr3_pa();
696 paddr = workarea_start; 731 ppd.paddr = workarea_start;
697 while (paddr < workarea_end) { 732 ppd.vaddr = workarea_start;
698 ppd.pmd_val = paddr + PMD_FLAGS; 733 ppd.vaddr_end = workarea_end;
699 ppd.vaddr = paddr; 734 sme_map_range_decrypted(&ppd);
700 sme_populate_pgd_large(&ppd);
701
702 paddr += PMD_PAGE_SIZE;
703 }
704 735
705 /* Flush the TLB - no globals so cr3 is enough */ 736 /* Flush the TLB - no globals so cr3 is enough */
706 native_write_cr3(__native_read_cr3()); 737 native_write_cr3(__native_read_cr3());
@@ -715,17 +746,6 @@ void __init sme_encrypt_kernel(void)
715 memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); 746 memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
716 ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD; 747 ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
717 748
718 /* Add encrypted kernel (identity) mappings */
719 pmd_flags = PMD_FLAGS | _PAGE_ENC;
720 paddr = kernel_start;
721 while (paddr < kernel_end) {
722 ppd.pmd_val = paddr + pmd_flags;
723 ppd.vaddr = paddr;
724 sme_populate_pgd_large(&ppd);
725
726 paddr += PMD_PAGE_SIZE;
727 }
728
729 /* 749 /*
730 * A different PGD index/entry must be used to get different 750 * A different PGD index/entry must be used to get different
731 * pagetable entries for the decrypted mapping. Choose the next 751 * pagetable entries for the decrypted mapping. Choose the next
@@ -735,29 +755,28 @@ void __init sme_encrypt_kernel(void)
735 decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); 755 decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
736 decrypted_base <<= PGDIR_SHIFT; 756 decrypted_base <<= PGDIR_SHIFT;
737 757
758 /* Add encrypted kernel (identity) mappings */
759 ppd.paddr = kernel_start;
760 ppd.vaddr = kernel_start;
761 ppd.vaddr_end = kernel_end;
762 sme_map_range_encrypted(&ppd);
763
738 /* Add decrypted, write-protected kernel (non-identity) mappings */ 764 /* Add decrypted, write-protected kernel (non-identity) mappings */
739 pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT); 765 ppd.paddr = kernel_start;
740 paddr = kernel_start; 766 ppd.vaddr = kernel_start + decrypted_base;
741 while (paddr < kernel_end) { 767 ppd.vaddr_end = kernel_end + decrypted_base;
742 ppd.pmd_val = paddr + pmd_flags; 768 sme_map_range_decrypted_wp(&ppd);
743 ppd.vaddr = paddr + decrypted_base;
744 sme_populate_pgd_large(&ppd);
745
746 paddr += PMD_PAGE_SIZE;
747 }
748 769
749 /* Add decrypted workarea mappings to both kernel mappings */ 770 /* Add decrypted workarea mappings to both kernel mappings */
750 paddr = workarea_start; 771 ppd.paddr = workarea_start;
751 while (paddr < workarea_end) { 772 ppd.vaddr = workarea_start;
752 ppd.pmd_val = paddr + PMD_FLAGS; 773 ppd.vaddr_end = workarea_end;
753 ppd.vaddr = paddr; 774 sme_map_range_decrypted(&ppd);
754 sme_populate_pgd_large(&ppd);
755
756 ppd.vaddr = paddr + decrypted_base;
757 sme_populate_pgd_large(&ppd);
758 775
759 paddr += PMD_PAGE_SIZE; 776 ppd.paddr = workarea_start;
760 } 777 ppd.vaddr = workarea_start + decrypted_base;
778 ppd.vaddr_end = workarea_end + decrypted_base;
779 sme_map_range_decrypted(&ppd);
761 780
762 /* Perform the encryption */ 781 /* Perform the encryption */
763 sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, 782 sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
@@ -768,11 +787,13 @@ void __init sme_encrypt_kernel(void)
768 * the decrypted areas - all that is needed for this is to remove 787 * the decrypted areas - all that is needed for this is to remove
769 * the PGD entry/entries. 788 * the PGD entry/entries.
770 */ 789 */
771 sme_clear_pgd(ppd.pgd, kernel_start + decrypted_base, 790 ppd.vaddr = kernel_start + decrypted_base;
772 kernel_end + decrypted_base); 791 ppd.vaddr_end = kernel_end + decrypted_base;
792 sme_clear_pgd(&ppd);
773 793
774 sme_clear_pgd(ppd.pgd, workarea_start + decrypted_base, 794 ppd.vaddr = workarea_start + decrypted_base;
775 workarea_end + decrypted_base); 795 ppd.vaddr_end = workarea_end + decrypted_base;
796 sme_clear_pgd(&ppd);
776 797
777 /* Flush the TLB - no globals so cr3 is enough */ 798 /* Flush the TLB - no globals so cr3 is enough */
778 native_write_cr3(__native_read_cr3()); 799 native_write_cr3(__native_read_cr3());