diff options
-rw-r--r-- | arch/x86/mm/mem_encrypt.c | 123 | ||||
-rw-r--r-- | arch/x86/mm/mem_encrypt_boot.S | 20 |
2 files changed, 121 insertions, 22 deletions
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 35f38caa1fa3..e74a1722d438 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c | |||
@@ -469,6 +469,7 @@ struct sme_populate_pgd_data { | |||
469 | pgd_t *pgd; | 469 | pgd_t *pgd; |
470 | 470 | ||
471 | pmdval_t pmd_flags; | 471 | pmdval_t pmd_flags; |
472 | pteval_t pte_flags; | ||
472 | unsigned long paddr; | 473 | unsigned long paddr; |
473 | 474 | ||
474 | unsigned long vaddr; | 475 | unsigned long vaddr; |
@@ -493,6 +494,7 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) | |||
493 | #define PGD_FLAGS _KERNPG_TABLE_NOENC | 494 | #define PGD_FLAGS _KERNPG_TABLE_NOENC |
494 | #define P4D_FLAGS _KERNPG_TABLE_NOENC | 495 | #define P4D_FLAGS _KERNPG_TABLE_NOENC |
495 | #define PUD_FLAGS _KERNPG_TABLE_NOENC | 496 | #define PUD_FLAGS _KERNPG_TABLE_NOENC |
497 | #define PMD_FLAGS _KERNPG_TABLE_NOENC | ||
496 | 498 | ||
497 | #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) | 499 | #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) |
498 | 500 | ||
@@ -502,7 +504,15 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) | |||
502 | 504 | ||
503 | #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) | 505 | #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) |
504 | 506 | ||
505 | static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) | 507 | #define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL) |
508 | |||
509 | #define PTE_FLAGS_DEC PTE_FLAGS | ||
510 | #define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ | ||
511 | (_PAGE_PAT | _PAGE_PWT)) | ||
512 | |||
513 | #define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC) | ||
514 | |||
515 | static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) | ||
506 | { | 516 | { |
507 | pgd_t *pgd_p; | 517 | pgd_t *pgd_p; |
508 | p4d_t *p4d_p; | 518 | p4d_t *p4d_p; |
@@ -553,7 +563,7 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) | |||
553 | pud_p += pud_index(ppd->vaddr); | 563 | pud_p += pud_index(ppd->vaddr); |
554 | if (native_pud_val(*pud_p)) { | 564 | if (native_pud_val(*pud_p)) { |
555 | if (native_pud_val(*pud_p) & _PAGE_PSE) | 565 | if (native_pud_val(*pud_p) & _PAGE_PSE) |
556 | return; | 566 | return NULL; |
557 | 567 | ||
558 | pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); | 568 | pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); |
559 | } else { | 569 | } else { |
@@ -567,16 +577,55 @@ static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) | |||
567 | native_set_pud(pud_p, pud); | 577 | native_set_pud(pud_p, pud); |
568 | } | 578 | } |
569 | 579 | ||
580 | return pmd_p; | ||
581 | } | ||
582 | |||
583 | static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) | ||
584 | { | ||
585 | pmd_t *pmd_p; | ||
586 | |||
587 | pmd_p = sme_prepare_pgd(ppd); | ||
588 | if (!pmd_p) | ||
589 | return; | ||
590 | |||
570 | pmd_p += pmd_index(ppd->vaddr); | 591 | pmd_p += pmd_index(ppd->vaddr); |
571 | if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) | 592 | if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) |
572 | native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags)); | 593 | native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags)); |
573 | } | 594 | } |
574 | 595 | ||
575 | static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, | 596 | static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) |
576 | pmdval_t pmd_flags) | ||
577 | { | 597 | { |
578 | ppd->pmd_flags = pmd_flags; | 598 | pmd_t *pmd_p; |
599 | pte_t *pte_p; | ||
600 | |||
601 | pmd_p = sme_prepare_pgd(ppd); | ||
602 | if (!pmd_p) | ||
603 | return; | ||
604 | |||
605 | pmd_p += pmd_index(ppd->vaddr); | ||
606 | if (native_pmd_val(*pmd_p)) { | ||
607 | if (native_pmd_val(*pmd_p) & _PAGE_PSE) | ||
608 | return; | ||
609 | |||
610 | pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK); | ||
611 | } else { | ||
612 | pmd_t pmd; | ||
579 | 613 | ||
614 | pte_p = ppd->pgtable_area; | ||
615 | memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE); | ||
616 | ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE; | ||
617 | |||
618 | pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS); | ||
619 | native_set_pmd(pmd_p, pmd); | ||
620 | } | ||
621 | |||
622 | pte_p += pte_index(ppd->vaddr); | ||
623 | if (!native_pte_val(*pte_p)) | ||
624 | native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags)); | ||
625 | } | ||
626 | |||
627 | static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) | ||
628 | { | ||
580 | while (ppd->vaddr < ppd->vaddr_end) { | 629 | while (ppd->vaddr < ppd->vaddr_end) { |
581 | sme_populate_pgd_large(ppd); | 630 | sme_populate_pgd_large(ppd); |
582 | 631 | ||
@@ -585,33 +634,71 @@ static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, | |||
585 | } | 634 | } |
586 | } | 635 | } |
587 | 636 | ||
637 | static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd) | ||
638 | { | ||
639 | while (ppd->vaddr < ppd->vaddr_end) { | ||
640 | sme_populate_pgd(ppd); | ||
641 | |||
642 | ppd->vaddr += PAGE_SIZE; | ||
643 | ppd->paddr += PAGE_SIZE; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, | ||
648 | pmdval_t pmd_flags, pteval_t pte_flags) | ||
649 | { | ||
650 | unsigned long vaddr_end; | ||
651 | |||
652 | ppd->pmd_flags = pmd_flags; | ||
653 | ppd->pte_flags = pte_flags; | ||
654 | |||
655 | /* Save original end value since we modify the struct value */ | ||
656 | vaddr_end = ppd->vaddr_end; | ||
657 | |||
658 | /* If start is not 2MB aligned, create PTE entries */ | ||
659 | ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE); | ||
660 | __sme_map_range_pte(ppd); | ||
661 | |||
662 | /* Create PMD entries */ | ||
663 | ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK; | ||
664 | __sme_map_range_pmd(ppd); | ||
665 | |||
666 | /* If end is not 2MB aligned, create PTE entries */ | ||
667 | ppd->vaddr_end = vaddr_end; | ||
668 | __sme_map_range_pte(ppd); | ||
669 | } | ||
670 | |||
588 | static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) | 671 | static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) |
589 | { | 672 | { |
590 | __sme_map_range(ppd, PMD_FLAGS_ENC); | 673 | __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC); |
591 | } | 674 | } |
592 | 675 | ||
593 | static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) | 676 | static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) |
594 | { | 677 | { |
595 | __sme_map_range(ppd, PMD_FLAGS_DEC); | 678 | __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC); |
596 | } | 679 | } |
597 | 680 | ||
598 | static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) | 681 | static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) |
599 | { | 682 | { |
600 | __sme_map_range(ppd, PMD_FLAGS_DEC_WP); | 683 | __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP); |
601 | } | 684 | } |
602 | 685 | ||
603 | static unsigned long __init sme_pgtable_calc(unsigned long len) | 686 | static unsigned long __init sme_pgtable_calc(unsigned long len) |
604 | { | 687 | { |
605 | unsigned long p4d_size, pud_size, pmd_size; | 688 | unsigned long p4d_size, pud_size, pmd_size, pte_size; |
606 | unsigned long total; | 689 | unsigned long total; |
607 | 690 | ||
608 | /* | 691 | /* |
609 | * Perform a relatively simplistic calculation of the pagetable | 692 | * Perform a relatively simplistic calculation of the pagetable |
610 | * entries that are needed. That mappings will be covered by 2MB | 693 | * entries that are needed. Those mappings will be covered mostly |
611 | * PMD entries so we can conservatively calculate the required | 694 | * by 2MB PMD entries so we can conservatively calculate the required |
612 | * number of P4D, PUD and PMD structures needed to perform the | 695 | * number of P4D, PUD and PMD structures needed to perform the |
613 | * mappings. Incrementing the count for each covers the case where | 696 | * mappings. For mappings that are not 2MB aligned, PTE mappings |
614 | * the addresses cross entries. | 697 | * would be needed for the start and end portion of the address range |
698 | * that fall outside of the 2MB alignment. This results in, at most, | ||
699 | * two extra pages to hold PTE entries for each range that is mapped. | ||
700 | * Incrementing the count for each covers the case where the addresses | ||
701 | * cross entries. | ||
615 | */ | 702 | */ |
616 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { | 703 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { |
617 | p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; | 704 | p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; |
@@ -625,8 +712,9 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) | |||
625 | } | 712 | } |
626 | pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; | 713 | pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; |
627 | pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; | 714 | pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; |
715 | pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE; | ||
628 | 716 | ||
629 | total = p4d_size + pud_size + pmd_size; | 717 | total = p4d_size + pud_size + pmd_size + pte_size; |
630 | 718 | ||
631 | /* | 719 | /* |
632 | * Now calculate the added pagetable structures needed to populate | 720 | * Now calculate the added pagetable structures needed to populate |
@@ -709,10 +797,13 @@ void __init sme_encrypt_kernel(void) | |||
709 | 797 | ||
710 | /* | 798 | /* |
711 | * The total workarea includes the executable encryption area and | 799 | * The total workarea includes the executable encryption area and |
712 | * the pagetable area. | 800 | * the pagetable area. The start of the workarea is already 2MB |
801 | * aligned, align the end of the workarea on a 2MB boundary so that | ||
802 | * we don't try to create/allocate PTE entries from the workarea | ||
803 | * before it is mapped. | ||
713 | */ | 804 | */ |
714 | workarea_len = execute_len + pgtable_area_len; | 805 | workarea_len = execute_len + pgtable_area_len; |
715 | workarea_end = workarea_start + workarea_len; | 806 | workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE); |
716 | 807 | ||
717 | /* | 808 | /* |
718 | * Set the address to the start of where newly created pagetable | 809 | * Set the address to the start of where newly created pagetable |
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S index de3688461145..23a8a9e411ea 100644 --- a/arch/x86/mm/mem_encrypt_boot.S +++ b/arch/x86/mm/mem_encrypt_boot.S | |||
@@ -104,6 +104,7 @@ ENTRY(__enc_copy) | |||
104 | mov %rdx, %cr4 | 104 | mov %rdx, %cr4 |
105 | 105 | ||
106 | push %r15 | 106 | push %r15 |
107 | push %r12 | ||
107 | 108 | ||
108 | movq %rcx, %r9 /* Save kernel length */ | 109 | movq %rcx, %r9 /* Save kernel length */ |
109 | movq %rdi, %r10 /* Save encrypted kernel address */ | 110 | movq %rdi, %r10 /* Save encrypted kernel address */ |
@@ -119,21 +120,27 @@ ENTRY(__enc_copy) | |||
119 | 120 | ||
120 | wbinvd /* Invalidate any cache entries */ | 121 | wbinvd /* Invalidate any cache entries */ |
121 | 122 | ||
122 | /* Copy/encrypt 2MB at a time */ | 123 | /* Copy/encrypt up to 2MB at a time */ |
124 | movq $PMD_PAGE_SIZE, %r12 | ||
123 | 1: | 125 | 1: |
126 | cmpq %r12, %r9 | ||
127 | jnb 2f | ||
128 | movq %r9, %r12 | ||
129 | |||
130 | 2: | ||
124 | movq %r11, %rsi /* Source - decrypted kernel */ | 131 | movq %r11, %rsi /* Source - decrypted kernel */ |
125 | movq %r8, %rdi /* Dest - intermediate copy buffer */ | 132 | movq %r8, %rdi /* Dest - intermediate copy buffer */ |
126 | movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ | 133 | movq %r12, %rcx |
127 | rep movsb | 134 | rep movsb |
128 | 135 | ||
129 | movq %r8, %rsi /* Source - intermediate copy buffer */ | 136 | movq %r8, %rsi /* Source - intermediate copy buffer */ |
130 | movq %r10, %rdi /* Dest - encrypted kernel */ | 137 | movq %r10, %rdi /* Dest - encrypted kernel */ |
131 | movq $PMD_PAGE_SIZE, %rcx /* 2MB length */ | 138 | movq %r12, %rcx |
132 | rep movsb | 139 | rep movsb |
133 | 140 | ||
134 | addq $PMD_PAGE_SIZE, %r11 | 141 | addq %r12, %r11 |
135 | addq $PMD_PAGE_SIZE, %r10 | 142 | addq %r12, %r10 |
136 | subq $PMD_PAGE_SIZE, %r9 /* Kernel length decrement */ | 143 | subq %r12, %r9 /* Kernel length decrement */ |
137 | jnz 1b /* Kernel length not zero? */ | 144 | jnz 1b /* Kernel length not zero? */ |
138 | 145 | ||
139 | /* Restore PAT register */ | 146 | /* Restore PAT register */ |
@@ -142,6 +149,7 @@ ENTRY(__enc_copy) | |||
142 | mov %r15, %rdx /* Restore original PAT value */ | 149 | mov %r15, %rdx /* Restore original PAT value */ |
143 | wrmsr | 150 | wrmsr |
144 | 151 | ||
152 | pop %r12 | ||
145 | pop %r15 | 153 | pop %r15 |
146 | 154 | ||
147 | ret | 155 | ret |