diff options
author | Tom Lendacky <thomas.lendacky@amd.com> | 2018-01-10 14:26:05 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-01-15 19:50:58 -0500 |
commit | bacf6b499e11760aef73a3bb5ce4e5eea74a3fd4 (patch) | |
tree | 559a67bc48b3fd983df8986a6b54aa875ff49c1d | |
parent | 1303880179e67c59e801429b7e5d0f6b21137d99 (diff) |
x86/mm: Use a struct to reduce parameters for SME PGD mapping
In preparation for follow-on patches, combine the PGD mapping parameters
into a struct to reduce the number of function arguments and allow for
direct updating of the next pagetable mapping area pointer.
Tested-by: Gabriel Craciunescu <nix.or.die@gmail.com>
Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20180110192605.6026.96206.stgit@tlendack-t1.amdoffice.net
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/mm/mem_encrypt.c | 90 |
1 files changed, 46 insertions, 44 deletions
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 391b13402e40..5a20696c5440 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c | |||
@@ -464,6 +464,14 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size) | |||
464 | set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); | 464 | set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); |
465 | } | 465 | } |
466 | 466 | ||
467 | struct sme_populate_pgd_data { | ||
468 | void *pgtable_area; | ||
469 | pgd_t *pgd; | ||
470 | |||
471 | pmdval_t pmd_val; | ||
472 | unsigned long vaddr; | ||
473 | }; | ||
474 | |||
467 | static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, | 475 | static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, |
468 | unsigned long end) | 476 | unsigned long end) |
469 | { | 477 | { |
@@ -486,15 +494,14 @@ static void __init sme_clear_pgd(pgd_t *pgd_base, unsigned long start, | |||
486 | #define PUD_FLAGS _KERNPG_TABLE_NOENC | 494 | #define PUD_FLAGS _KERNPG_TABLE_NOENC |
487 | #define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) | 495 | #define PMD_FLAGS (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) |
488 | 496 | ||
489 | static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, | 497 | static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) |
490 | unsigned long vaddr, pmdval_t pmd_val) | ||
491 | { | 498 | { |
492 | pgd_t *pgd_p; | 499 | pgd_t *pgd_p; |
493 | p4d_t *p4d_p; | 500 | p4d_t *p4d_p; |
494 | pud_t *pud_p; | 501 | pud_t *pud_p; |
495 | pmd_t *pmd_p; | 502 | pmd_t *pmd_p; |
496 | 503 | ||
497 | pgd_p = pgd_base + pgd_index(vaddr); | 504 | pgd_p = ppd->pgd + pgd_index(ppd->vaddr); |
498 | if (native_pgd_val(*pgd_p)) { | 505 | if (native_pgd_val(*pgd_p)) { |
499 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) | 506 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) |
500 | p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); | 507 | p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); |
@@ -504,15 +511,15 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, | |||
504 | pgd_t pgd; | 511 | pgd_t pgd; |
505 | 512 | ||
506 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { | 513 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { |
507 | p4d_p = pgtable_area; | 514 | p4d_p = ppd->pgtable_area; |
508 | memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); | 515 | memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); |
509 | pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; | 516 | ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; |
510 | 517 | ||
511 | pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); | 518 | pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); |
512 | } else { | 519 | } else { |
513 | pud_p = pgtable_area; | 520 | pud_p = ppd->pgtable_area; |
514 | memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); | 521 | memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); |
515 | pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; | 522 | ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; |
516 | 523 | ||
517 | pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); | 524 | pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); |
518 | } | 525 | } |
@@ -520,44 +527,41 @@ static void __init *sme_populate_pgd(pgd_t *pgd_base, void *pgtable_area, | |||
520 | } | 527 | } |
521 | 528 | ||
522 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { | 529 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { |
523 | p4d_p += p4d_index(vaddr); | 530 | p4d_p += p4d_index(ppd->vaddr); |
524 | if (native_p4d_val(*p4d_p)) { | 531 | if (native_p4d_val(*p4d_p)) { |
525 | pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); | 532 | pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); |
526 | } else { | 533 | } else { |
527 | p4d_t p4d; | 534 | p4d_t p4d; |
528 | 535 | ||
529 | pud_p = pgtable_area; | 536 | pud_p = ppd->pgtable_area; |
530 | memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); | 537 | memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); |
531 | pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; | 538 | ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; |
532 | 539 | ||
533 | p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); | 540 | p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); |
534 | native_set_p4d(p4d_p, p4d); | 541 | native_set_p4d(p4d_p, p4d); |
535 | } | 542 | } |
536 | } | 543 | } |
537 | 544 | ||
538 | pud_p += pud_index(vaddr); | 545 | pud_p += pud_index(ppd->vaddr); |
539 | if (native_pud_val(*pud_p)) { | 546 | if (native_pud_val(*pud_p)) { |
540 | if (native_pud_val(*pud_p) & _PAGE_PSE) | 547 | if (native_pud_val(*pud_p) & _PAGE_PSE) |
541 | goto out; | 548 | return; |
542 | 549 | ||
543 | pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); | 550 | pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); |
544 | } else { | 551 | } else { |
545 | pud_t pud; | 552 | pud_t pud; |
546 | 553 | ||
547 | pmd_p = pgtable_area; | 554 | pmd_p = ppd->pgtable_area; |
548 | memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); | 555 | memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); |
549 | pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; | 556 | ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; |
550 | 557 | ||
551 | pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); | 558 | pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); |
552 | native_set_pud(pud_p, pud); | 559 | native_set_pud(pud_p, pud); |
553 | } | 560 | } |
554 | 561 | ||
555 | pmd_p += pmd_index(vaddr); | 562 | pmd_p += pmd_index(ppd->vaddr); |
556 | if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) | 563 | if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) |
557 | native_set_pmd(pmd_p, native_make_pmd(pmd_val)); | 564 | native_set_pmd(pmd_p, native_make_pmd(ppd->pmd_val)); |
558 | |||
559 | out: | ||
560 | return pgtable_area; | ||
561 | } | 565 | } |
562 | 566 | ||
563 | static unsigned long __init sme_pgtable_calc(unsigned long len) | 567 | static unsigned long __init sme_pgtable_calc(unsigned long len) |
@@ -615,11 +619,10 @@ void __init sme_encrypt_kernel(void) | |||
615 | unsigned long workarea_start, workarea_end, workarea_len; | 619 | unsigned long workarea_start, workarea_end, workarea_len; |
616 | unsigned long execute_start, execute_end, execute_len; | 620 | unsigned long execute_start, execute_end, execute_len; |
617 | unsigned long kernel_start, kernel_end, kernel_len; | 621 | unsigned long kernel_start, kernel_end, kernel_len; |
622 | struct sme_populate_pgd_data ppd; | ||
618 | unsigned long pgtable_area_len; | 623 | unsigned long pgtable_area_len; |
619 | unsigned long paddr, pmd_flags; | 624 | unsigned long paddr, pmd_flags; |
620 | unsigned long decrypted_base; | 625 | unsigned long decrypted_base; |
621 | void *pgtable_area; | ||
622 | pgd_t *pgd; | ||
623 | 626 | ||
624 | if (!sme_active()) | 627 | if (!sme_active()) |
625 | return; | 628 | return; |
@@ -683,18 +686,18 @@ void __init sme_encrypt_kernel(void) | |||
683 | * pagetables and when the new encrypted and decrypted kernel | 686 | * pagetables and when the new encrypted and decrypted kernel |
684 | * mappings are populated. | 687 | * mappings are populated. |
685 | */ | 688 | */ |
686 | pgtable_area = (void *)execute_end; | 689 | ppd.pgtable_area = (void *)execute_end; |
687 | 690 | ||
688 | /* | 691 | /* |
689 | * Make sure the current pagetable structure has entries for | 692 | * Make sure the current pagetable structure has entries for |
690 | * addressing the workarea. | 693 | * addressing the workarea. |
691 | */ | 694 | */ |
692 | pgd = (pgd_t *)native_read_cr3_pa(); | 695 | ppd.pgd = (pgd_t *)native_read_cr3_pa(); |
693 | paddr = workarea_start; | 696 | paddr = workarea_start; |
694 | while (paddr < workarea_end) { | 697 | while (paddr < workarea_end) { |
695 | pgtable_area = sme_populate_pgd(pgd, pgtable_area, | 698 | ppd.pmd_val = paddr + PMD_FLAGS; |
696 | paddr, | 699 | ppd.vaddr = paddr; |
697 | paddr + PMD_FLAGS); | 700 | sme_populate_pgd_large(&ppd); |
698 | 701 | ||
699 | paddr += PMD_PAGE_SIZE; | 702 | paddr += PMD_PAGE_SIZE; |
700 | } | 703 | } |
@@ -708,17 +711,17 @@ void __init sme_encrypt_kernel(void) | |||
708 | * populated with new PUDs and PMDs as the encrypted and decrypted | 711 | * populated with new PUDs and PMDs as the encrypted and decrypted |
709 | * kernel mappings are created. | 712 | * kernel mappings are created. |
710 | */ | 713 | */ |
711 | pgd = pgtable_area; | 714 | ppd.pgd = ppd.pgtable_area; |
712 | memset(pgd, 0, sizeof(*pgd) * PTRS_PER_PGD); | 715 | memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); |
713 | pgtable_area += sizeof(*pgd) * PTRS_PER_PGD; | 716 | ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD; |
714 | 717 | ||
715 | /* Add encrypted kernel (identity) mappings */ | 718 | /* Add encrypted kernel (identity) mappings */ |
716 | pmd_flags = PMD_FLAGS | _PAGE_ENC; | 719 | pmd_flags = PMD_FLAGS | _PAGE_ENC; |
717 | paddr = kernel_start; | 720 | paddr = kernel_start; |
718 | while (paddr < kernel_end) { | 721 | while (paddr < kernel_end) { |
719 | pgtable_area = sme_populate_pgd(pgd, pgtable_area, | 722 | ppd.pmd_val = paddr + pmd_flags; |
720 | paddr, | 723 | ppd.vaddr = paddr; |
721 | paddr + pmd_flags); | 724 | sme_populate_pgd_large(&ppd); |
722 | 725 | ||
723 | paddr += PMD_PAGE_SIZE; | 726 | paddr += PMD_PAGE_SIZE; |
724 | } | 727 | } |
@@ -736,9 +739,9 @@ void __init sme_encrypt_kernel(void) | |||
736 | pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT); | 739 | pmd_flags = (PMD_FLAGS & ~_PAGE_CACHE_MASK) | (_PAGE_PAT | _PAGE_PWT); |
737 | paddr = kernel_start; | 740 | paddr = kernel_start; |
738 | while (paddr < kernel_end) { | 741 | while (paddr < kernel_end) { |
739 | pgtable_area = sme_populate_pgd(pgd, pgtable_area, | 742 | ppd.pmd_val = paddr + pmd_flags; |
740 | paddr + decrypted_base, | 743 | ppd.vaddr = paddr + decrypted_base; |
741 | paddr + pmd_flags); | 744 | sme_populate_pgd_large(&ppd); |
742 | 745 | ||
743 | paddr += PMD_PAGE_SIZE; | 746 | paddr += PMD_PAGE_SIZE; |
744 | } | 747 | } |
@@ -746,30 +749,29 @@ void __init sme_encrypt_kernel(void) | |||
746 | /* Add decrypted workarea mappings to both kernel mappings */ | 749 | /* Add decrypted workarea mappings to both kernel mappings */ |
747 | paddr = workarea_start; | 750 | paddr = workarea_start; |
748 | while (paddr < workarea_end) { | 751 | while (paddr < workarea_end) { |
749 | pgtable_area = sme_populate_pgd(pgd, pgtable_area, | 752 | ppd.pmd_val = paddr + PMD_FLAGS; |
750 | paddr, | 753 | ppd.vaddr = paddr; |
751 | paddr + PMD_FLAGS); | 754 | sme_populate_pgd_large(&ppd); |
752 | 755 | ||
753 | pgtable_area = sme_populate_pgd(pgd, pgtable_area, | 756 | ppd.vaddr = paddr + decrypted_base; |
754 | paddr + decrypted_base, | 757 | sme_populate_pgd_large(&ppd); |
755 | paddr + PMD_FLAGS); | ||
756 | 758 | ||
757 | paddr += PMD_PAGE_SIZE; | 759 | paddr += PMD_PAGE_SIZE; |
758 | } | 760 | } |
759 | 761 | ||
760 | /* Perform the encryption */ | 762 | /* Perform the encryption */ |
761 | sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, | 763 | sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, |
762 | kernel_len, workarea_start, (unsigned long)pgd); | 764 | kernel_len, workarea_start, (unsigned long)ppd.pgd); |
763 | 765 | ||
764 | /* | 766 | /* |
765 | * At this point we are running encrypted. Remove the mappings for | 767 | * At this point we are running encrypted. Remove the mappings for |
766 | * the decrypted areas - all that is needed for this is to remove | 768 | * the decrypted areas - all that is needed for this is to remove |
767 | * the PGD entry/entries. | 769 | * the PGD entry/entries. |
768 | */ | 770 | */ |
769 | sme_clear_pgd(pgd, kernel_start + decrypted_base, | 771 | sme_clear_pgd(ppd.pgd, kernel_start + decrypted_base, |
770 | kernel_end + decrypted_base); | 772 | kernel_end + decrypted_base); |
771 | 773 | ||
772 | sme_clear_pgd(pgd, workarea_start + decrypted_base, | 774 | sme_clear_pgd(ppd.pgd, workarea_start + decrypted_base, |
773 | workarea_end + decrypted_base); | 775 | workarea_end + decrypted_base); |
774 | 776 | ||
775 | /* Flush the TLB - no globals so cr3 is enough */ | 777 | /* Flush the TLB - no globals so cr3 is enough */ |