diff options
Diffstat (limited to 'arch/x86/mm/mem_encrypt.c')
-rw-r--r-- | arch/x86/mm/mem_encrypt.c | 578 |
1 files changed, 1 insertions, 577 deletions
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 1a53071e2e17..3a1b5fe4c2ca 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c | |||
@@ -25,17 +25,12 @@ | |||
25 | #include <asm/bootparam.h> | 25 | #include <asm/bootparam.h> |
26 | #include <asm/set_memory.h> | 26 | #include <asm/set_memory.h> |
27 | #include <asm/cacheflush.h> | 27 | #include <asm/cacheflush.h> |
28 | #include <asm/sections.h> | ||
29 | #include <asm/processor-flags.h> | 28 | #include <asm/processor-flags.h> |
30 | #include <asm/msr.h> | 29 | #include <asm/msr.h> |
31 | #include <asm/cmdline.h> | 30 | #include <asm/cmdline.h> |
32 | 31 | ||
33 | #include "mm_internal.h" | 32 | #include "mm_internal.h" |
34 | 33 | ||
35 | static char sme_cmdline_arg[] __initdata = "mem_encrypt"; | ||
36 | static char sme_cmdline_on[] __initdata = "on"; | ||
37 | static char sme_cmdline_off[] __initdata = "off"; | ||
38 | |||
39 | /* | 34 | /* |
40 | * Since SME related variables are set early in the boot process they must | 35 | * Since SME related variables are set early in the boot process they must |
41 | * reside in the .data section so as not to be zeroed out when the .bss | 36 | * reside in the .data section so as not to be zeroed out when the .bss |
@@ -46,7 +41,7 @@ EXPORT_SYMBOL(sme_me_mask); | |||
46 | DEFINE_STATIC_KEY_FALSE(sev_enable_key); | 41 | DEFINE_STATIC_KEY_FALSE(sev_enable_key); |
47 | EXPORT_SYMBOL_GPL(sev_enable_key); | 42 | EXPORT_SYMBOL_GPL(sev_enable_key); |
48 | 43 | ||
49 | static bool sev_enabled __section(.data); | 44 | bool sev_enabled __section(.data); |
50 | 45 | ||
51 | /* Buffer used for early in-place encryption by BSP, no locking needed */ | 46 | /* Buffer used for early in-place encryption by BSP, no locking needed */ |
52 | static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); | 47 | static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); |
@@ -463,574 +458,3 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size) | |||
463 | /* Make the SWIOTLB buffer area decrypted */ | 458 | /* Make the SWIOTLB buffer area decrypted */ |
464 | set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); | 459 | set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); |
465 | } | 460 | } |
466 | |||
467 | struct sme_populate_pgd_data { | ||
468 | void *pgtable_area; | ||
469 | pgd_t *pgd; | ||
470 | |||
471 | pmdval_t pmd_flags; | ||
472 | pteval_t pte_flags; | ||
473 | unsigned long paddr; | ||
474 | |||
475 | unsigned long vaddr; | ||
476 | unsigned long vaddr_end; | ||
477 | }; | ||
478 | |||
479 | static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) | ||
480 | { | ||
481 | unsigned long pgd_start, pgd_end, pgd_size; | ||
482 | pgd_t *pgd_p; | ||
483 | |||
484 | pgd_start = ppd->vaddr & PGDIR_MASK; | ||
485 | pgd_end = ppd->vaddr_end & PGDIR_MASK; | ||
486 | |||
487 | pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t); | ||
488 | |||
489 | pgd_p = ppd->pgd + pgd_index(ppd->vaddr); | ||
490 | |||
491 | memset(pgd_p, 0, pgd_size); | ||
492 | } | ||
493 | |||
494 | #define PGD_FLAGS _KERNPG_TABLE_NOENC | ||
495 | #define P4D_FLAGS _KERNPG_TABLE_NOENC | ||
496 | #define PUD_FLAGS _KERNPG_TABLE_NOENC | ||
497 | #define PMD_FLAGS _KERNPG_TABLE_NOENC | ||
498 | |||
499 | #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) | ||
500 | |||
501 | #define PMD_FLAGS_DEC PMD_FLAGS_LARGE | ||
502 | #define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ | ||
503 | (_PAGE_PAT | _PAGE_PWT)) | ||
504 | |||
505 | #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) | ||
506 | |||
507 | #define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL) | ||
508 | |||
509 | #define PTE_FLAGS_DEC PTE_FLAGS | ||
510 | #define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ | ||
511 | (_PAGE_PAT | _PAGE_PWT)) | ||
512 | |||
513 | #define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC) | ||
514 | |||
515 | static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) | ||
516 | { | ||
517 | pgd_t *pgd_p; | ||
518 | p4d_t *p4d_p; | ||
519 | pud_t *pud_p; | ||
520 | pmd_t *pmd_p; | ||
521 | |||
522 | pgd_p = ppd->pgd + pgd_index(ppd->vaddr); | ||
523 | if (native_pgd_val(*pgd_p)) { | ||
524 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) | ||
525 | p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); | ||
526 | else | ||
527 | pud_p = (pud_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); | ||
528 | } else { | ||
529 | pgd_t pgd; | ||
530 | |||
531 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { | ||
532 | p4d_p = ppd->pgtable_area; | ||
533 | memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); | ||
534 | ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; | ||
535 | |||
536 | pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); | ||
537 | } else { | ||
538 | pud_p = ppd->pgtable_area; | ||
539 | memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); | ||
540 | ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; | ||
541 | |||
542 | pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); | ||
543 | } | ||
544 | native_set_pgd(pgd_p, pgd); | ||
545 | } | ||
546 | |||
547 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { | ||
548 | p4d_p += p4d_index(ppd->vaddr); | ||
549 | if (native_p4d_val(*p4d_p)) { | ||
550 | pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); | ||
551 | } else { | ||
552 | p4d_t p4d; | ||
553 | |||
554 | pud_p = ppd->pgtable_area; | ||
555 | memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); | ||
556 | ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; | ||
557 | |||
558 | p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); | ||
559 | native_set_p4d(p4d_p, p4d); | ||
560 | } | ||
561 | } | ||
562 | |||
563 | pud_p += pud_index(ppd->vaddr); | ||
564 | if (native_pud_val(*pud_p)) { | ||
565 | if (native_pud_val(*pud_p) & _PAGE_PSE) | ||
566 | return NULL; | ||
567 | |||
568 | pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); | ||
569 | } else { | ||
570 | pud_t pud; | ||
571 | |||
572 | pmd_p = ppd->pgtable_area; | ||
573 | memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); | ||
574 | ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; | ||
575 | |||
576 | pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); | ||
577 | native_set_pud(pud_p, pud); | ||
578 | } | ||
579 | |||
580 | return pmd_p; | ||
581 | } | ||
582 | |||
583 | static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) | ||
584 | { | ||
585 | pmd_t *pmd_p; | ||
586 | |||
587 | pmd_p = sme_prepare_pgd(ppd); | ||
588 | if (!pmd_p) | ||
589 | return; | ||
590 | |||
591 | pmd_p += pmd_index(ppd->vaddr); | ||
592 | if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) | ||
593 | native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags)); | ||
594 | } | ||
595 | |||
596 | static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) | ||
597 | { | ||
598 | pmd_t *pmd_p; | ||
599 | pte_t *pte_p; | ||
600 | |||
601 | pmd_p = sme_prepare_pgd(ppd); | ||
602 | if (!pmd_p) | ||
603 | return; | ||
604 | |||
605 | pmd_p += pmd_index(ppd->vaddr); | ||
606 | if (native_pmd_val(*pmd_p)) { | ||
607 | if (native_pmd_val(*pmd_p) & _PAGE_PSE) | ||
608 | return; | ||
609 | |||
610 | pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK); | ||
611 | } else { | ||
612 | pmd_t pmd; | ||
613 | |||
614 | pte_p = ppd->pgtable_area; | ||
615 | memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE); | ||
616 | ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE; | ||
617 | |||
618 | pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS); | ||
619 | native_set_pmd(pmd_p, pmd); | ||
620 | } | ||
621 | |||
622 | pte_p += pte_index(ppd->vaddr); | ||
623 | if (!native_pte_val(*pte_p)) | ||
624 | native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags)); | ||
625 | } | ||
626 | |||
627 | static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) | ||
628 | { | ||
629 | while (ppd->vaddr < ppd->vaddr_end) { | ||
630 | sme_populate_pgd_large(ppd); | ||
631 | |||
632 | ppd->vaddr += PMD_PAGE_SIZE; | ||
633 | ppd->paddr += PMD_PAGE_SIZE; | ||
634 | } | ||
635 | } | ||
636 | |||
637 | static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd) | ||
638 | { | ||
639 | while (ppd->vaddr < ppd->vaddr_end) { | ||
640 | sme_populate_pgd(ppd); | ||
641 | |||
642 | ppd->vaddr += PAGE_SIZE; | ||
643 | ppd->paddr += PAGE_SIZE; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, | ||
648 | pmdval_t pmd_flags, pteval_t pte_flags) | ||
649 | { | ||
650 | unsigned long vaddr_end; | ||
651 | |||
652 | ppd->pmd_flags = pmd_flags; | ||
653 | ppd->pte_flags = pte_flags; | ||
654 | |||
655 | /* Save original end value since we modify the struct value */ | ||
656 | vaddr_end = ppd->vaddr_end; | ||
657 | |||
658 | /* If start is not 2MB aligned, create PTE entries */ | ||
659 | ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE); | ||
660 | __sme_map_range_pte(ppd); | ||
661 | |||
662 | /* Create PMD entries */ | ||
663 | ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK; | ||
664 | __sme_map_range_pmd(ppd); | ||
665 | |||
666 | /* If end is not 2MB aligned, create PTE entries */ | ||
667 | ppd->vaddr_end = vaddr_end; | ||
668 | __sme_map_range_pte(ppd); | ||
669 | } | ||
670 | |||
671 | static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) | ||
672 | { | ||
673 | __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC); | ||
674 | } | ||
675 | |||
676 | static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) | ||
677 | { | ||
678 | __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC); | ||
679 | } | ||
680 | |||
681 | static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) | ||
682 | { | ||
683 | __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP); | ||
684 | } | ||
685 | |||
686 | static unsigned long __init sme_pgtable_calc(unsigned long len) | ||
687 | { | ||
688 | unsigned long p4d_size, pud_size, pmd_size, pte_size; | ||
689 | unsigned long total; | ||
690 | |||
691 | /* | ||
692 | * Perform a relatively simplistic calculation of the pagetable | ||
693 | * entries that are needed. Those mappings will be covered mostly | ||
694 | * by 2MB PMD entries so we can conservatively calculate the required | ||
695 | * number of P4D, PUD and PMD structures needed to perform the | ||
696 | * mappings. For mappings that are not 2MB aligned, PTE mappings | ||
697 | * would be needed for the start and end portion of the address range | ||
698 | * that fall outside of the 2MB alignment. This results in, at most, | ||
699 | * two extra pages to hold PTE entries for each range that is mapped. | ||
700 | * Incrementing the count for each covers the case where the addresses | ||
701 | * cross entries. | ||
702 | */ | ||
703 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { | ||
704 | p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; | ||
705 | p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D; | ||
706 | pud_size = (ALIGN(len, P4D_SIZE) / P4D_SIZE) + 1; | ||
707 | pud_size *= sizeof(pud_t) * PTRS_PER_PUD; | ||
708 | } else { | ||
709 | p4d_size = 0; | ||
710 | pud_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; | ||
711 | pud_size *= sizeof(pud_t) * PTRS_PER_PUD; | ||
712 | } | ||
713 | pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; | ||
714 | pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; | ||
715 | pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE; | ||
716 | |||
717 | total = p4d_size + pud_size + pmd_size + pte_size; | ||
718 | |||
719 | /* | ||
720 | * Now calculate the added pagetable structures needed to populate | ||
721 | * the new pagetables. | ||
722 | */ | ||
723 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { | ||
724 | p4d_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE; | ||
725 | p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D; | ||
726 | pud_size = ALIGN(total, P4D_SIZE) / P4D_SIZE; | ||
727 | pud_size *= sizeof(pud_t) * PTRS_PER_PUD; | ||
728 | } else { | ||
729 | p4d_size = 0; | ||
730 | pud_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE; | ||
731 | pud_size *= sizeof(pud_t) * PTRS_PER_PUD; | ||
732 | } | ||
733 | pmd_size = ALIGN(total, PUD_SIZE) / PUD_SIZE; | ||
734 | pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; | ||
735 | |||
736 | total += p4d_size + pud_size + pmd_size; | ||
737 | |||
738 | return total; | ||
739 | } | ||
740 | |||
741 | void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp) | ||
742 | { | ||
743 | unsigned long workarea_start, workarea_end, workarea_len; | ||
744 | unsigned long execute_start, execute_end, execute_len; | ||
745 | unsigned long kernel_start, kernel_end, kernel_len; | ||
746 | unsigned long initrd_start, initrd_end, initrd_len; | ||
747 | struct sme_populate_pgd_data ppd; | ||
748 | unsigned long pgtable_area_len; | ||
749 | unsigned long decrypted_base; | ||
750 | |||
751 | if (!sme_active()) | ||
752 | return; | ||
753 | |||
754 | /* | ||
755 | * Prepare for encrypting the kernel and initrd by building new | ||
756 | * pagetables with the necessary attributes needed to encrypt the | ||
757 | * kernel in place. | ||
758 | * | ||
759 | * One range of virtual addresses will map the memory occupied | ||
760 | * by the kernel and initrd as encrypted. | ||
761 | * | ||
762 | * Another range of virtual addresses will map the memory occupied | ||
763 | * by the kernel and initrd as decrypted and write-protected. | ||
764 | * | ||
765 | * The use of write-protect attribute will prevent any of the | ||
766 | * memory from being cached. | ||
767 | */ | ||
768 | |||
769 | /* Physical addresses gives us the identity mapped virtual addresses */ | ||
770 | kernel_start = __pa_symbol(_text); | ||
771 | kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE); | ||
772 | kernel_len = kernel_end - kernel_start; | ||
773 | |||
774 | initrd_start = 0; | ||
775 | initrd_end = 0; | ||
776 | initrd_len = 0; | ||
777 | #ifdef CONFIG_BLK_DEV_INITRD | ||
778 | initrd_len = (unsigned long)bp->hdr.ramdisk_size | | ||
779 | ((unsigned long)bp->ext_ramdisk_size << 32); | ||
780 | if (initrd_len) { | ||
781 | initrd_start = (unsigned long)bp->hdr.ramdisk_image | | ||
782 | ((unsigned long)bp->ext_ramdisk_image << 32); | ||
783 | initrd_end = PAGE_ALIGN(initrd_start + initrd_len); | ||
784 | initrd_len = initrd_end - initrd_start; | ||
785 | } | ||
786 | #endif | ||
787 | |||
788 | /* Set the encryption workarea to be immediately after the kernel */ | ||
789 | workarea_start = kernel_end; | ||
790 | |||
791 | /* | ||
792 | * Calculate required number of workarea bytes needed: | ||
793 | * executable encryption area size: | ||
794 | * stack page (PAGE_SIZE) | ||
795 | * encryption routine page (PAGE_SIZE) | ||
796 | * intermediate copy buffer (PMD_PAGE_SIZE) | ||
797 | * pagetable structures for the encryption of the kernel | ||
798 | * pagetable structures for workarea (in case not currently mapped) | ||
799 | */ | ||
800 | execute_start = workarea_start; | ||
801 | execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE; | ||
802 | execute_len = execute_end - execute_start; | ||
803 | |||
804 | /* | ||
805 | * One PGD for both encrypted and decrypted mappings and a set of | ||
806 | * PUDs and PMDs for each of the encrypted and decrypted mappings. | ||
807 | */ | ||
808 | pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD; | ||
809 | pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2; | ||
810 | if (initrd_len) | ||
811 | pgtable_area_len += sme_pgtable_calc(initrd_len) * 2; | ||
812 | |||
813 | /* PUDs and PMDs needed in the current pagetables for the workarea */ | ||
814 | pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len); | ||
815 | |||
816 | /* | ||
817 | * The total workarea includes the executable encryption area and | ||
818 | * the pagetable area. The start of the workarea is already 2MB | ||
819 | * aligned, align the end of the workarea on a 2MB boundary so that | ||
820 | * we don't try to create/allocate PTE entries from the workarea | ||
821 | * before it is mapped. | ||
822 | */ | ||
823 | workarea_len = execute_len + pgtable_area_len; | ||
824 | workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE); | ||
825 | |||
826 | /* | ||
827 | * Set the address to the start of where newly created pagetable | ||
828 | * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable | ||
829 | * structures are created when the workarea is added to the current | ||
830 | * pagetables and when the new encrypted and decrypted kernel | ||
831 | * mappings are populated. | ||
832 | */ | ||
833 | ppd.pgtable_area = (void *)execute_end; | ||
834 | |||
835 | /* | ||
836 | * Make sure the current pagetable structure has entries for | ||
837 | * addressing the workarea. | ||
838 | */ | ||
839 | ppd.pgd = (pgd_t *)native_read_cr3_pa(); | ||
840 | ppd.paddr = workarea_start; | ||
841 | ppd.vaddr = workarea_start; | ||
842 | ppd.vaddr_end = workarea_end; | ||
843 | sme_map_range_decrypted(&ppd); | ||
844 | |||
845 | /* Flush the TLB - no globals so cr3 is enough */ | ||
846 | native_write_cr3(__native_read_cr3()); | ||
847 | |||
848 | /* | ||
849 | * A new pagetable structure is being built to allow for the kernel | ||
850 | * and initrd to be encrypted. It starts with an empty PGD that will | ||
851 | * then be populated with new PUDs and PMDs as the encrypted and | ||
852 | * decrypted kernel mappings are created. | ||
853 | */ | ||
854 | ppd.pgd = ppd.pgtable_area; | ||
855 | memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); | ||
856 | ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD; | ||
857 | |||
858 | /* | ||
859 | * A different PGD index/entry must be used to get different | ||
860 | * pagetable entries for the decrypted mapping. Choose the next | ||
861 | * PGD index and convert it to a virtual address to be used as | ||
862 | * the base of the mapping. | ||
863 | */ | ||
864 | decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); | ||
865 | if (initrd_len) { | ||
866 | unsigned long check_base; | ||
867 | |||
868 | check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1); | ||
869 | decrypted_base = max(decrypted_base, check_base); | ||
870 | } | ||
871 | decrypted_base <<= PGDIR_SHIFT; | ||
872 | |||
873 | /* Add encrypted kernel (identity) mappings */ | ||
874 | ppd.paddr = kernel_start; | ||
875 | ppd.vaddr = kernel_start; | ||
876 | ppd.vaddr_end = kernel_end; | ||
877 | sme_map_range_encrypted(&ppd); | ||
878 | |||
879 | /* Add decrypted, write-protected kernel (non-identity) mappings */ | ||
880 | ppd.paddr = kernel_start; | ||
881 | ppd.vaddr = kernel_start + decrypted_base; | ||
882 | ppd.vaddr_end = kernel_end + decrypted_base; | ||
883 | sme_map_range_decrypted_wp(&ppd); | ||
884 | |||
885 | if (initrd_len) { | ||
886 | /* Add encrypted initrd (identity) mappings */ | ||
887 | ppd.paddr = initrd_start; | ||
888 | ppd.vaddr = initrd_start; | ||
889 | ppd.vaddr_end = initrd_end; | ||
890 | sme_map_range_encrypted(&ppd); | ||
891 | /* | ||
892 | * Add decrypted, write-protected initrd (non-identity) mappings | ||
893 | */ | ||
894 | ppd.paddr = initrd_start; | ||
895 | ppd.vaddr = initrd_start + decrypted_base; | ||
896 | ppd.vaddr_end = initrd_end + decrypted_base; | ||
897 | sme_map_range_decrypted_wp(&ppd); | ||
898 | } | ||
899 | |||
900 | /* Add decrypted workarea mappings to both kernel mappings */ | ||
901 | ppd.paddr = workarea_start; | ||
902 | ppd.vaddr = workarea_start; | ||
903 | ppd.vaddr_end = workarea_end; | ||
904 | sme_map_range_decrypted(&ppd); | ||
905 | |||
906 | ppd.paddr = workarea_start; | ||
907 | ppd.vaddr = workarea_start + decrypted_base; | ||
908 | ppd.vaddr_end = workarea_end + decrypted_base; | ||
909 | sme_map_range_decrypted(&ppd); | ||
910 | |||
911 | /* Perform the encryption */ | ||
912 | sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, | ||
913 | kernel_len, workarea_start, (unsigned long)ppd.pgd); | ||
914 | |||
915 | if (initrd_len) | ||
916 | sme_encrypt_execute(initrd_start, initrd_start + decrypted_base, | ||
917 | initrd_len, workarea_start, | ||
918 | (unsigned long)ppd.pgd); | ||
919 | |||
920 | /* | ||
921 | * At this point we are running encrypted. Remove the mappings for | ||
922 | * the decrypted areas - all that is needed for this is to remove | ||
923 | * the PGD entry/entries. | ||
924 | */ | ||
925 | ppd.vaddr = kernel_start + decrypted_base; | ||
926 | ppd.vaddr_end = kernel_end + decrypted_base; | ||
927 | sme_clear_pgd(&ppd); | ||
928 | |||
929 | if (initrd_len) { | ||
930 | ppd.vaddr = initrd_start + decrypted_base; | ||
931 | ppd.vaddr_end = initrd_end + decrypted_base; | ||
932 | sme_clear_pgd(&ppd); | ||
933 | } | ||
934 | |||
935 | ppd.vaddr = workarea_start + decrypted_base; | ||
936 | ppd.vaddr_end = workarea_end + decrypted_base; | ||
937 | sme_clear_pgd(&ppd); | ||
938 | |||
939 | /* Flush the TLB - no globals so cr3 is enough */ | ||
940 | native_write_cr3(__native_read_cr3()); | ||
941 | } | ||
942 | |||
943 | void __init __nostackprotector sme_enable(struct boot_params *bp) | ||
944 | { | ||
945 | const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off; | ||
946 | unsigned int eax, ebx, ecx, edx; | ||
947 | unsigned long feature_mask; | ||
948 | bool active_by_default; | ||
949 | unsigned long me_mask; | ||
950 | char buffer[16]; | ||
951 | u64 msr; | ||
952 | |||
953 | /* Check for the SME/SEV support leaf */ | ||
954 | eax = 0x80000000; | ||
955 | ecx = 0; | ||
956 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
957 | if (eax < 0x8000001f) | ||
958 | return; | ||
959 | |||
960 | #define AMD_SME_BIT BIT(0) | ||
961 | #define AMD_SEV_BIT BIT(1) | ||
962 | /* | ||
963 | * Set the feature mask (SME or SEV) based on whether we are | ||
964 | * running under a hypervisor. | ||
965 | */ | ||
966 | eax = 1; | ||
967 | ecx = 0; | ||
968 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
969 | feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT; | ||
970 | |||
971 | /* | ||
972 | * Check for the SME/SEV feature: | ||
973 | * CPUID Fn8000_001F[EAX] | ||
974 | * - Bit 0 - Secure Memory Encryption support | ||
975 | * - Bit 1 - Secure Encrypted Virtualization support | ||
976 | * CPUID Fn8000_001F[EBX] | ||
977 | * - Bits 5:0 - Pagetable bit position used to indicate encryption | ||
978 | */ | ||
979 | eax = 0x8000001f; | ||
980 | ecx = 0; | ||
981 | native_cpuid(&eax, &ebx, &ecx, &edx); | ||
982 | if (!(eax & feature_mask)) | ||
983 | return; | ||
984 | |||
985 | me_mask = 1UL << (ebx & 0x3f); | ||
986 | |||
987 | /* Check if memory encryption is enabled */ | ||
988 | if (feature_mask == AMD_SME_BIT) { | ||
989 | /* For SME, check the SYSCFG MSR */ | ||
990 | msr = __rdmsr(MSR_K8_SYSCFG); | ||
991 | if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) | ||
992 | return; | ||
993 | } else { | ||
994 | /* For SEV, check the SEV MSR */ | ||
995 | msr = __rdmsr(MSR_AMD64_SEV); | ||
996 | if (!(msr & MSR_AMD64_SEV_ENABLED)) | ||
997 | return; | ||
998 | |||
999 | /* SEV state cannot be controlled by a command line option */ | ||
1000 | sme_me_mask = me_mask; | ||
1001 | sev_enabled = true; | ||
1002 | return; | ||
1003 | } | ||
1004 | |||
1005 | /* | ||
1006 | * Fixups have not been applied to phys_base yet and we're running | ||
1007 | * identity mapped, so we must obtain the address to the SME command | ||
1008 | * line argument data using rip-relative addressing. | ||
1009 | */ | ||
1010 | asm ("lea sme_cmdline_arg(%%rip), %0" | ||
1011 | : "=r" (cmdline_arg) | ||
1012 | : "p" (sme_cmdline_arg)); | ||
1013 | asm ("lea sme_cmdline_on(%%rip), %0" | ||
1014 | : "=r" (cmdline_on) | ||
1015 | : "p" (sme_cmdline_on)); | ||
1016 | asm ("lea sme_cmdline_off(%%rip), %0" | ||
1017 | : "=r" (cmdline_off) | ||
1018 | : "p" (sme_cmdline_off)); | ||
1019 | |||
1020 | if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT)) | ||
1021 | active_by_default = true; | ||
1022 | else | ||
1023 | active_by_default = false; | ||
1024 | |||
1025 | cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | | ||
1026 | ((u64)bp->ext_cmd_line_ptr << 32)); | ||
1027 | |||
1028 | cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)); | ||
1029 | |||
1030 | if (!strncmp(buffer, cmdline_on, sizeof(buffer))) | ||
1031 | sme_me_mask = me_mask; | ||
1032 | else if (!strncmp(buffer, cmdline_off, sizeof(buffer))) | ||
1033 | sme_me_mask = 0; | ||
1034 | else | ||
1035 | sme_me_mask = active_by_default ? me_mask : 0; | ||
1036 | } | ||