diff options
author | Ingo Molnar <mingo@kernel.org> | 2013-11-26 06:23:04 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-11-26 06:23:04 -0500 |
commit | 61d066977583803d333f1e7266b8ba772162dda4 (patch) | |
tree | 087d56e401422f1a8a165a782216aa6d0291a60e /arch/x86/platform | |
parent | b975dc3689fc6a3718ad288ce080924f9cb7e176 (diff) | |
parent | ee41143027706d9f342dfe05487a00b20887fde7 (diff) |
Merge tag 'efi-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi into x86/efi
Pull EFI virtual mapping changes from Matt Fleming:
* New static EFI runtime services virtual mapping layout which is
groundwork for kexec support on EFI. (Borislav Petkov)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/platform')
-rw-r--r-- | arch/x86/platform/efi/efi.c | 111 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi_32.c | 9 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi_64.c | 109 | ||||
-rw-r--r-- | arch/x86/platform/efi/efi_stub_64.S | 54 |
4 files changed, 248 insertions, 35 deletions
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 92c02344a060..f8ec4dafc74e 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -12,6 +12,8 @@ | |||
12 | * Bibo Mao <bibo.mao@intel.com> | 12 | * Bibo Mao <bibo.mao@intel.com> |
13 | * Chandramouli Narayanan <mouli@linux.intel.com> | 13 | * Chandramouli Narayanan <mouli@linux.intel.com> |
14 | * Huang Ying <ying.huang@intel.com> | 14 | * Huang Ying <ying.huang@intel.com> |
15 | * Copyright (C) 2013 SuSE Labs | ||
16 | * Borislav Petkov <bp@suse.de> - runtime services VA mapping | ||
15 | * | 17 | * |
16 | * Copied from efi_32.c to eliminate the duplicated code between EFI | 18 | * Copied from efi_32.c to eliminate the duplicated code between EFI |
17 | * 32/64 support code. --ying 2007-10-26 | 19 | * 32/64 support code. --ying 2007-10-26 |
@@ -51,7 +53,7 @@ | |||
51 | #include <asm/x86_init.h> | 53 | #include <asm/x86_init.h> |
52 | #include <asm/rtc.h> | 54 | #include <asm/rtc.h> |
53 | 55 | ||
54 | #define EFI_DEBUG 1 | 56 | #define EFI_DEBUG |
55 | 57 | ||
56 | #define EFI_MIN_RESERVE 5120 | 58 | #define EFI_MIN_RESERVE 5120 |
57 | 59 | ||
@@ -398,9 +400,9 @@ int __init efi_memblock_x86_reserve_range(void) | |||
398 | return 0; | 400 | return 0; |
399 | } | 401 | } |
400 | 402 | ||
401 | #if EFI_DEBUG | ||
402 | static void __init print_efi_memmap(void) | 403 | static void __init print_efi_memmap(void) |
403 | { | 404 | { |
405 | #ifdef EFI_DEBUG | ||
404 | efi_memory_desc_t *md; | 406 | efi_memory_desc_t *md; |
405 | void *p; | 407 | void *p; |
406 | int i; | 408 | int i; |
@@ -415,8 +417,8 @@ static void __init print_efi_memmap(void) | |||
415 | md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), | 417 | md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), |
416 | (md->num_pages >> (20 - EFI_PAGE_SHIFT))); | 418 | (md->num_pages >> (20 - EFI_PAGE_SHIFT))); |
417 | } | 419 | } |
418 | } | ||
419 | #endif /* EFI_DEBUG */ | 420 | #endif /* EFI_DEBUG */ |
421 | } | ||
420 | 422 | ||
421 | void __init efi_reserve_boot_services(void) | 423 | void __init efi_reserve_boot_services(void) |
422 | { | 424 | { |
@@ -696,10 +698,7 @@ void __init efi_init(void) | |||
696 | x86_platform.set_wallclock = efi_set_rtc_mmss; | 698 | x86_platform.set_wallclock = efi_set_rtc_mmss; |
697 | } | 699 | } |
698 | #endif | 700 | #endif |
699 | |||
700 | #if EFI_DEBUG | ||
701 | print_efi_memmap(); | 701 | print_efi_memmap(); |
702 | #endif | ||
703 | } | 702 | } |
704 | 703 | ||
705 | void __init efi_late_init(void) | 704 | void __init efi_late_init(void) |
@@ -748,21 +747,56 @@ void efi_memory_uc(u64 addr, unsigned long size) | |||
748 | set_memory_uc(addr, npages); | 747 | set_memory_uc(addr, npages); |
749 | } | 748 | } |
750 | 749 | ||
750 | void __init old_map_region(efi_memory_desc_t *md) | ||
751 | { | ||
752 | u64 start_pfn, end_pfn, end; | ||
753 | unsigned long size; | ||
754 | void *va; | ||
755 | |||
756 | start_pfn = PFN_DOWN(md->phys_addr); | ||
757 | size = md->num_pages << PAGE_SHIFT; | ||
758 | end = md->phys_addr + size; | ||
759 | end_pfn = PFN_UP(end); | ||
760 | |||
761 | if (pfn_range_is_mapped(start_pfn, end_pfn)) { | ||
762 | va = __va(md->phys_addr); | ||
763 | |||
764 | if (!(md->attribute & EFI_MEMORY_WB)) | ||
765 | efi_memory_uc((u64)(unsigned long)va, size); | ||
766 | } else | ||
767 | va = efi_ioremap(md->phys_addr, size, | ||
768 | md->type, md->attribute); | ||
769 | |||
770 | md->virt_addr = (u64) (unsigned long) va; | ||
771 | if (!va) | ||
772 | pr_err("ioremap of 0x%llX failed!\n", | ||
773 | (unsigned long long)md->phys_addr); | ||
774 | } | ||
775 | |||
751 | /* | 776 | /* |
752 | * This function will switch the EFI runtime services to virtual mode. | 777 | * This function will switch the EFI runtime services to virtual mode. |
753 | * Essentially, look through the EFI memmap and map every region that | 778 | * Essentially, we look through the EFI memmap and map every region that |
754 | * has the runtime attribute bit set in its memory descriptor and update | 779 | * has the runtime attribute bit set in its memory descriptor into the |
755 | * that memory descriptor with the virtual address obtained from ioremap(). | 780 | * ->trampoline_pgd page table using a top-down VA allocation scheme. |
756 | * This enables the runtime services to be called without having to | 781 | * |
782 | * The old method which used to update that memory descriptor with the | ||
783 | * virtual address obtained from ioremap() is still supported when the | ||
784 | * kernel is booted with efi=old_map on its command line. Same old | ||
785 | * method enabled the runtime services to be called without having to | ||
757 | * thunk back into physical mode for every invocation. | 786 | * thunk back into physical mode for every invocation. |
787 | * | ||
788 | * The new method does a pagetable switch in a preemption-safe manner | ||
789 | * so that we're in a different address space when calling a runtime | ||
790 | * function. For function arguments passing we do copy the PGDs of the | ||
791 | * kernel page table into ->trampoline_pgd prior to each call. | ||
758 | */ | 792 | */ |
759 | void __init efi_enter_virtual_mode(void) | 793 | void __init efi_enter_virtual_mode(void) |
760 | { | 794 | { |
761 | efi_memory_desc_t *md, *prev_md = NULL; | 795 | efi_memory_desc_t *md, *prev_md = NULL; |
762 | efi_status_t status; | 796 | void *p, *new_memmap = NULL; |
763 | unsigned long size; | 797 | unsigned long size; |
764 | u64 end, systab, start_pfn, end_pfn; | 798 | efi_status_t status; |
765 | void *p, *va, *new_memmap = NULL; | 799 | u64 end, systab; |
766 | int count = 0; | 800 | int count = 0; |
767 | 801 | ||
768 | efi.systab = NULL; | 802 | efi.systab = NULL; |
@@ -771,7 +805,6 @@ void __init efi_enter_virtual_mode(void) | |||
771 | * We don't do virtual mode, since we don't do runtime services, on | 805 | * We don't do virtual mode, since we don't do runtime services, on |
772 | * non-native EFI | 806 | * non-native EFI |
773 | */ | 807 | */ |
774 | |||
775 | if (!efi_is_native()) { | 808 | if (!efi_is_native()) { |
776 | efi_unmap_memmap(); | 809 | efi_unmap_memmap(); |
777 | return; | 810 | return; |
@@ -802,6 +835,7 @@ void __init efi_enter_virtual_mode(void) | |||
802 | continue; | 835 | continue; |
803 | } | 836 | } |
804 | prev_md = md; | 837 | prev_md = md; |
838 | |||
805 | } | 839 | } |
806 | 840 | ||
807 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 841 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
@@ -814,36 +848,24 @@ void __init efi_enter_virtual_mode(void) | |||
814 | continue; | 848 | continue; |
815 | } | 849 | } |
816 | 850 | ||
851 | efi_map_region(md); | ||
852 | |||
817 | size = md->num_pages << EFI_PAGE_SHIFT; | 853 | size = md->num_pages << EFI_PAGE_SHIFT; |
818 | end = md->phys_addr + size; | 854 | end = md->phys_addr + size; |
819 | 855 | ||
820 | start_pfn = PFN_DOWN(md->phys_addr); | ||
821 | end_pfn = PFN_UP(end); | ||
822 | if (pfn_range_is_mapped(start_pfn, end_pfn)) { | ||
823 | va = __va(md->phys_addr); | ||
824 | |||
825 | if (!(md->attribute & EFI_MEMORY_WB)) | ||
826 | efi_memory_uc((u64)(unsigned long)va, size); | ||
827 | } else | ||
828 | va = efi_ioremap(md->phys_addr, size, | ||
829 | md->type, md->attribute); | ||
830 | |||
831 | md->virt_addr = (u64) (unsigned long) va; | ||
832 | |||
833 | if (!va) { | ||
834 | pr_err("ioremap of 0x%llX failed!\n", | ||
835 | (unsigned long long)md->phys_addr); | ||
836 | continue; | ||
837 | } | ||
838 | |||
839 | systab = (u64) (unsigned long) efi_phys.systab; | 856 | systab = (u64) (unsigned long) efi_phys.systab; |
840 | if (md->phys_addr <= systab && systab < end) { | 857 | if (md->phys_addr <= systab && systab < end) { |
841 | systab += md->virt_addr - md->phys_addr; | 858 | systab += md->virt_addr - md->phys_addr; |
859 | |||
842 | efi.systab = (efi_system_table_t *) (unsigned long) systab; | 860 | efi.systab = (efi_system_table_t *) (unsigned long) systab; |
843 | } | 861 | } |
862 | |||
844 | new_memmap = krealloc(new_memmap, | 863 | new_memmap = krealloc(new_memmap, |
845 | (count + 1) * memmap.desc_size, | 864 | (count + 1) * memmap.desc_size, |
846 | GFP_KERNEL); | 865 | GFP_KERNEL); |
866 | if (!new_memmap) | ||
867 | goto err_out; | ||
868 | |||
847 | memcpy(new_memmap + (count * memmap.desc_size), md, | 869 | memcpy(new_memmap + (count * memmap.desc_size), md, |
848 | memmap.desc_size); | 870 | memmap.desc_size); |
849 | count++; | 871 | count++; |
@@ -851,6 +873,9 @@ void __init efi_enter_virtual_mode(void) | |||
851 | 873 | ||
852 | BUG_ON(!efi.systab); | 874 | BUG_ON(!efi.systab); |
853 | 875 | ||
876 | efi_setup_page_tables(); | ||
877 | efi_sync_low_kernel_mappings(); | ||
878 | |||
854 | status = phys_efi_set_virtual_address_map( | 879 | status = phys_efi_set_virtual_address_map( |
855 | memmap.desc_size * count, | 880 | memmap.desc_size * count, |
856 | memmap.desc_size, | 881 | memmap.desc_size, |
@@ -883,7 +908,8 @@ void __init efi_enter_virtual_mode(void) | |||
883 | efi.query_variable_info = virt_efi_query_variable_info; | 908 | efi.query_variable_info = virt_efi_query_variable_info; |
884 | efi.update_capsule = virt_efi_update_capsule; | 909 | efi.update_capsule = virt_efi_update_capsule; |
885 | efi.query_capsule_caps = virt_efi_query_capsule_caps; | 910 | efi.query_capsule_caps = virt_efi_query_capsule_caps; |
886 | if (__supported_pte_mask & _PAGE_NX) | 911 | |
912 | if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX)) | ||
887 | runtime_code_page_mkexec(); | 913 | runtime_code_page_mkexec(); |
888 | 914 | ||
889 | kfree(new_memmap); | 915 | kfree(new_memmap); |
@@ -894,6 +920,11 @@ void __init efi_enter_virtual_mode(void) | |||
894 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | 920 | EFI_VARIABLE_BOOTSERVICE_ACCESS | |
895 | EFI_VARIABLE_RUNTIME_ACCESS, | 921 | EFI_VARIABLE_RUNTIME_ACCESS, |
896 | 0, NULL); | 922 | 0, NULL); |
923 | |||
924 | return; | ||
925 | |||
926 | err_out: | ||
927 | pr_err("Error reallocating memory, EFI runtime non-functional!\n"); | ||
897 | } | 928 | } |
898 | 929 | ||
899 | /* | 930 | /* |
@@ -1013,3 +1044,15 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) | |||
1013 | return EFI_SUCCESS; | 1044 | return EFI_SUCCESS; |
1014 | } | 1045 | } |
1015 | EXPORT_SYMBOL_GPL(efi_query_variable_store); | 1046 | EXPORT_SYMBOL_GPL(efi_query_variable_store); |
1047 | |||
1048 | static int __init parse_efi_cmdline(char *str) | ||
1049 | { | ||
1050 | if (*str == '=') | ||
1051 | str++; | ||
1052 | |||
1053 | if (!strncmp(str, "old_map", 7)) | ||
1054 | set_bit(EFI_OLD_MEMMAP, &x86_efi_facility); | ||
1055 | |||
1056 | return 0; | ||
1057 | } | ||
1058 | early_param("efi", parse_efi_cmdline); | ||
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c index 40e446941dd7..e94557cf5487 100644 --- a/arch/x86/platform/efi/efi_32.c +++ b/arch/x86/platform/efi/efi_32.c | |||
@@ -37,9 +37,16 @@ | |||
37 | * claim EFI runtime service handler exclusively and to duplicate a memory in | 37 | * claim EFI runtime service handler exclusively and to duplicate a memory in |
38 | * low memory space say 0 - 3G. | 38 | * low memory space say 0 - 3G. |
39 | */ | 39 | */ |
40 | |||
41 | static unsigned long efi_rt_eflags; | 40 | static unsigned long efi_rt_eflags; |
42 | 41 | ||
42 | void efi_sync_low_kernel_mappings(void) {} | ||
43 | void efi_setup_page_tables(void) {} | ||
44 | |||
45 | void __init efi_map_region(efi_memory_desc_t *md) | ||
46 | { | ||
47 | old_map_region(md); | ||
48 | } | ||
49 | |||
43 | void efi_call_phys_prelog(void) | 50 | void efi_call_phys_prelog(void) |
44 | { | 51 | { |
45 | struct desc_ptr gdt_descr; | 52 | struct desc_ptr gdt_descr; |
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 39a0e7f1f0a3..bf286c386d33 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -38,10 +38,28 @@ | |||
38 | #include <asm/efi.h> | 38 | #include <asm/efi.h> |
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/fixmap.h> | 40 | #include <asm/fixmap.h> |
41 | #include <asm/realmode.h> | ||
41 | 42 | ||
42 | static pgd_t *save_pgd __initdata; | 43 | static pgd_t *save_pgd __initdata; |
43 | static unsigned long efi_flags __initdata; | 44 | static unsigned long efi_flags __initdata; |
44 | 45 | ||
46 | /* | ||
47 | * We allocate runtime services regions bottom-up, starting from -4G, i.e. | ||
48 | * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G. | ||
49 | */ | ||
50 | static u64 efi_va = -4 * (1UL << 30); | ||
51 | #define EFI_VA_END (-68 * (1UL << 30)) | ||
52 | |||
53 | /* | ||
54 | * Scratch space used for switching the pagetable in the EFI stub | ||
55 | */ | ||
56 | struct efi_scratch { | ||
57 | u64 r15; | ||
58 | u64 prev_cr3; | ||
59 | pgd_t *efi_pgt; | ||
60 | bool use_pgd; | ||
61 | }; | ||
62 | |||
45 | static void __init early_code_mapping_set_exec(int executable) | 63 | static void __init early_code_mapping_set_exec(int executable) |
46 | { | 64 | { |
47 | efi_memory_desc_t *md; | 65 | efi_memory_desc_t *md; |
@@ -65,6 +83,9 @@ void __init efi_call_phys_prelog(void) | |||
65 | int pgd; | 83 | int pgd; |
66 | int n_pgds; | 84 | int n_pgds; |
67 | 85 | ||
86 | if (!efi_enabled(EFI_OLD_MEMMAP)) | ||
87 | return; | ||
88 | |||
68 | early_code_mapping_set_exec(1); | 89 | early_code_mapping_set_exec(1); |
69 | local_irq_save(efi_flags); | 90 | local_irq_save(efi_flags); |
70 | 91 | ||
@@ -86,6 +107,10 @@ void __init efi_call_phys_epilog(void) | |||
86 | */ | 107 | */ |
87 | int pgd; | 108 | int pgd; |
88 | int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); | 109 | int n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); |
110 | |||
111 | if (!efi_enabled(EFI_OLD_MEMMAP)) | ||
112 | return; | ||
113 | |||
89 | for (pgd = 0; pgd < n_pgds; pgd++) | 114 | for (pgd = 0; pgd < n_pgds; pgd++) |
90 | set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); | 115 | set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); |
91 | kfree(save_pgd); | 116 | kfree(save_pgd); |
@@ -94,6 +119,90 @@ void __init efi_call_phys_epilog(void) | |||
94 | early_code_mapping_set_exec(0); | 119 | early_code_mapping_set_exec(0); |
95 | } | 120 | } |
96 | 121 | ||
122 | /* | ||
123 | * Add low kernel mappings for passing arguments to EFI functions. | ||
124 | */ | ||
125 | void efi_sync_low_kernel_mappings(void) | ||
126 | { | ||
127 | unsigned num_pgds; | ||
128 | pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); | ||
129 | |||
130 | if (efi_enabled(EFI_OLD_MEMMAP)) | ||
131 | return; | ||
132 | |||
133 | num_pgds = pgd_index(MODULES_END - 1) - pgd_index(PAGE_OFFSET); | ||
134 | |||
135 | memcpy(pgd + pgd_index(PAGE_OFFSET), | ||
136 | init_mm.pgd + pgd_index(PAGE_OFFSET), | ||
137 | sizeof(pgd_t) * num_pgds); | ||
138 | } | ||
139 | |||
140 | void efi_setup_page_tables(void) | ||
141 | { | ||
142 | efi_scratch.efi_pgt = (pgd_t *)(unsigned long)real_mode_header->trampoline_pgd; | ||
143 | |||
144 | if (!efi_enabled(EFI_OLD_MEMMAP)) | ||
145 | efi_scratch.use_pgd = true; | ||
146 | } | ||
147 | |||
148 | static void __init __map_region(efi_memory_desc_t *md, u64 va) | ||
149 | { | ||
150 | pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); | ||
151 | unsigned long pf = 0, size; | ||
152 | u64 end; | ||
153 | |||
154 | if (!(md->attribute & EFI_MEMORY_WB)) | ||
155 | pf |= _PAGE_PCD; | ||
156 | |||
157 | size = md->num_pages << PAGE_SHIFT; | ||
158 | end = va + size; | ||
159 | |||
160 | if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) | ||
161 | pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n", | ||
162 | md->phys_addr, va); | ||
163 | } | ||
164 | |||
165 | void __init efi_map_region(efi_memory_desc_t *md) | ||
166 | { | ||
167 | unsigned long size = md->num_pages << PAGE_SHIFT; | ||
168 | u64 pa = md->phys_addr; | ||
169 | |||
170 | if (efi_enabled(EFI_OLD_MEMMAP)) | ||
171 | return old_map_region(md); | ||
172 | |||
173 | /* | ||
174 | * Make sure the 1:1 mappings are present as a catch-all for b0rked | ||
175 | * firmware which doesn't update all internal pointers after switching | ||
176 | * to virtual mode and would otherwise crap on us. | ||
177 | */ | ||
178 | __map_region(md, md->phys_addr); | ||
179 | |||
180 | efi_va -= size; | ||
181 | |||
182 | /* Is PA 2M-aligned? */ | ||
183 | if (!(pa & (PMD_SIZE - 1))) { | ||
184 | efi_va &= PMD_MASK; | ||
185 | } else { | ||
186 | u64 pa_offset = pa & (PMD_SIZE - 1); | ||
187 | u64 prev_va = efi_va; | ||
188 | |||
189 | /* get us the same offset within this 2M page */ | ||
190 | efi_va = (efi_va & PMD_MASK) + pa_offset; | ||
191 | |||
192 | if (efi_va > prev_va) | ||
193 | efi_va -= PMD_SIZE; | ||
194 | } | ||
195 | |||
196 | if (efi_va < EFI_VA_END) { | ||
197 | pr_warn(FW_WARN "VA address range overflow!\n"); | ||
198 | return; | ||
199 | } | ||
200 | |||
201 | /* Do the VA map */ | ||
202 | __map_region(md, efi_va); | ||
203 | md->virt_addr = efi_va; | ||
204 | } | ||
205 | |||
97 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, | 206 | void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size, |
98 | u32 type, u64 attribute) | 207 | u32 type, u64 attribute) |
99 | { | 208 | { |
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 4c07ccab8146..88073b140298 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S | |||
@@ -34,10 +34,47 @@ | |||
34 | mov %rsi, %cr0; \ | 34 | mov %rsi, %cr0; \ |
35 | mov (%rsp), %rsp | 35 | mov (%rsp), %rsp |
36 | 36 | ||
37 | /* stolen from gcc */ | ||
38 | .macro FLUSH_TLB_ALL | ||
39 | movq %r15, efi_scratch(%rip) | ||
40 | movq %r14, efi_scratch+8(%rip) | ||
41 | movq %cr4, %r15 | ||
42 | movq %r15, %r14 | ||
43 | andb $0x7f, %r14b | ||
44 | movq %r14, %cr4 | ||
45 | movq %r15, %cr4 | ||
46 | movq efi_scratch+8(%rip), %r14 | ||
47 | movq efi_scratch(%rip), %r15 | ||
48 | .endm | ||
49 | |||
50 | .macro SWITCH_PGT | ||
51 | cmpb $0, efi_scratch+24(%rip) | ||
52 | je 1f | ||
53 | movq %r15, efi_scratch(%rip) # r15 | ||
54 | # save previous CR3 | ||
55 | movq %cr3, %r15 | ||
56 | movq %r15, efi_scratch+8(%rip) # prev_cr3 | ||
57 | movq efi_scratch+16(%rip), %r15 # EFI pgt | ||
58 | movq %r15, %cr3 | ||
59 | 1: | ||
60 | .endm | ||
61 | |||
62 | .macro RESTORE_PGT | ||
63 | cmpb $0, efi_scratch+24(%rip) | ||
64 | je 2f | ||
65 | movq efi_scratch+8(%rip), %r15 | ||
66 | movq %r15, %cr3 | ||
67 | movq efi_scratch(%rip), %r15 | ||
68 | FLUSH_TLB_ALL | ||
69 | 2: | ||
70 | .endm | ||
71 | |||
37 | ENTRY(efi_call0) | 72 | ENTRY(efi_call0) |
38 | SAVE_XMM | 73 | SAVE_XMM |
39 | subq $32, %rsp | 74 | subq $32, %rsp |
75 | SWITCH_PGT | ||
40 | call *%rdi | 76 | call *%rdi |
77 | RESTORE_PGT | ||
41 | addq $32, %rsp | 78 | addq $32, %rsp |
42 | RESTORE_XMM | 79 | RESTORE_XMM |
43 | ret | 80 | ret |
@@ -47,7 +84,9 @@ ENTRY(efi_call1) | |||
47 | SAVE_XMM | 84 | SAVE_XMM |
48 | subq $32, %rsp | 85 | subq $32, %rsp |
49 | mov %rsi, %rcx | 86 | mov %rsi, %rcx |
87 | SWITCH_PGT | ||
50 | call *%rdi | 88 | call *%rdi |
89 | RESTORE_PGT | ||
51 | addq $32, %rsp | 90 | addq $32, %rsp |
52 | RESTORE_XMM | 91 | RESTORE_XMM |
53 | ret | 92 | ret |
@@ -57,7 +96,9 @@ ENTRY(efi_call2) | |||
57 | SAVE_XMM | 96 | SAVE_XMM |
58 | subq $32, %rsp | 97 | subq $32, %rsp |
59 | mov %rsi, %rcx | 98 | mov %rsi, %rcx |
99 | SWITCH_PGT | ||
60 | call *%rdi | 100 | call *%rdi |
101 | RESTORE_PGT | ||
61 | addq $32, %rsp | 102 | addq $32, %rsp |
62 | RESTORE_XMM | 103 | RESTORE_XMM |
63 | ret | 104 | ret |
@@ -68,7 +109,9 @@ ENTRY(efi_call3) | |||
68 | subq $32, %rsp | 109 | subq $32, %rsp |
69 | mov %rcx, %r8 | 110 | mov %rcx, %r8 |
70 | mov %rsi, %rcx | 111 | mov %rsi, %rcx |
112 | SWITCH_PGT | ||
71 | call *%rdi | 113 | call *%rdi |
114 | RESTORE_PGT | ||
72 | addq $32, %rsp | 115 | addq $32, %rsp |
73 | RESTORE_XMM | 116 | RESTORE_XMM |
74 | ret | 117 | ret |
@@ -80,7 +123,9 @@ ENTRY(efi_call4) | |||
80 | mov %r8, %r9 | 123 | mov %r8, %r9 |
81 | mov %rcx, %r8 | 124 | mov %rcx, %r8 |
82 | mov %rsi, %rcx | 125 | mov %rsi, %rcx |
126 | SWITCH_PGT | ||
83 | call *%rdi | 127 | call *%rdi |
128 | RESTORE_PGT | ||
84 | addq $32, %rsp | 129 | addq $32, %rsp |
85 | RESTORE_XMM | 130 | RESTORE_XMM |
86 | ret | 131 | ret |
@@ -93,7 +138,9 @@ ENTRY(efi_call5) | |||
93 | mov %r8, %r9 | 138 | mov %r8, %r9 |
94 | mov %rcx, %r8 | 139 | mov %rcx, %r8 |
95 | mov %rsi, %rcx | 140 | mov %rsi, %rcx |
141 | SWITCH_PGT | ||
96 | call *%rdi | 142 | call *%rdi |
143 | RESTORE_PGT | ||
97 | addq $48, %rsp | 144 | addq $48, %rsp |
98 | RESTORE_XMM | 145 | RESTORE_XMM |
99 | ret | 146 | ret |
@@ -109,8 +156,15 @@ ENTRY(efi_call6) | |||
109 | mov %r8, %r9 | 156 | mov %r8, %r9 |
110 | mov %rcx, %r8 | 157 | mov %rcx, %r8 |
111 | mov %rsi, %rcx | 158 | mov %rsi, %rcx |
159 | SWITCH_PGT | ||
112 | call *%rdi | 160 | call *%rdi |
161 | RESTORE_PGT | ||
113 | addq $48, %rsp | 162 | addq $48, %rsp |
114 | RESTORE_XMM | 163 | RESTORE_XMM |
115 | ret | 164 | ret |
116 | ENDPROC(efi_call6) | 165 | ENDPROC(efi_call6) |
166 | |||
167 | .data | ||
168 | ENTRY(efi_scratch) | ||
169 | .fill 3,8,0 | ||
170 | .byte 0 | ||