aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/efi.h100
-rw-r--r--arch/x86/include/asm/fpu-internal.h10
2 files changed, 39 insertions, 71 deletions
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 0869434eaf72..1eb5f6433ad8 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_X86_EFI_H 1#ifndef _ASM_X86_EFI_H
2#define _ASM_X86_EFI_H 2#define _ASM_X86_EFI_H
3 3
4#include <asm/i387.h>
4/* 5/*
5 * We map the EFI regions needed for runtime services non-contiguously, 6 * We map the EFI regions needed for runtime services non-contiguously,
6 * with preserved alignment on virtual addresses starting from -4G down 7 * with preserved alignment on virtual addresses starting from -4G down
@@ -27,91 +28,58 @@
27 28
28extern unsigned long asmlinkage efi_call_phys(void *, ...); 29extern unsigned long asmlinkage efi_call_phys(void *, ...);
29 30
30#define efi_call_phys0(f) efi_call_phys(f)
31#define efi_call_phys1(f, a1) efi_call_phys(f, a1)
32#define efi_call_phys2(f, a1, a2) efi_call_phys(f, a1, a2)
33#define efi_call_phys3(f, a1, a2, a3) efi_call_phys(f, a1, a2, a3)
34#define efi_call_phys4(f, a1, a2, a3, a4) \
35 efi_call_phys(f, a1, a2, a3, a4)
36#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
37 efi_call_phys(f, a1, a2, a3, a4, a5)
38#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
39 efi_call_phys(f, a1, a2, a3, a4, a5, a6)
40/* 31/*
41 * Wrap all the virtual calls in a way that forces the parameters on the stack. 32 * Wrap all the virtual calls in a way that forces the parameters on the stack.
42 */ 33 */
43 34
35/* Use this macro if your virtual returns a non-void value */
44#define efi_call_virt(f, args...) \ 36#define efi_call_virt(f, args...) \
45 ((efi_##f##_t __attribute__((regparm(0)))*)efi.systab->runtime->f)(args) 37({ \
46 38 efi_status_t __s; \
47#define efi_call_virt0(f) efi_call_virt(f) 39 kernel_fpu_begin(); \
48#define efi_call_virt1(f, a1) efi_call_virt(f, a1) 40 __s = ((efi_##f##_t __attribute__((regparm(0)))*) \
49#define efi_call_virt2(f, a1, a2) efi_call_virt(f, a1, a2) 41 efi.systab->runtime->f)(args); \
50#define efi_call_virt3(f, a1, a2, a3) efi_call_virt(f, a1, a2, a3) 42 kernel_fpu_end(); \
51#define efi_call_virt4(f, a1, a2, a3, a4) \ 43 __s; \
52 efi_call_virt(f, a1, a2, a3, a4) 44})
53#define efi_call_virt5(f, a1, a2, a3, a4, a5) \ 45
54 efi_call_virt(f, a1, a2, a3, a4, a5) 46/* Use this macro if your virtual call does not return any value */
55#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \ 47#define __efi_call_virt(f, args...) \
56 efi_call_virt(f, a1, a2, a3, a4, a5, a6) 48({ \
49 kernel_fpu_begin(); \
50 ((efi_##f##_t __attribute__((regparm(0)))*) \
51 efi.systab->runtime->f)(args); \
52 kernel_fpu_end(); \
53})
57 54
58#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size) 55#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
59 56
60#else /* !CONFIG_X86_32 */ 57#else /* !CONFIG_X86_32 */
61 58
62extern u64 efi_call0(void *fp); 59#define EFI_LOADER_SIGNATURE "EL64"
63extern u64 efi_call1(void *fp, u64 arg1); 60
64extern u64 efi_call2(void *fp, u64 arg1, u64 arg2); 61extern u64 asmlinkage efi_call(void *fp, ...);
65extern u64 efi_call3(void *fp, u64 arg1, u64 arg2, u64 arg3); 62
66extern u64 efi_call4(void *fp, u64 arg1, u64 arg2, u64 arg3, u64 arg4); 63#define efi_call_phys(f, args...) efi_call((f), args)
67extern u64 efi_call5(void *fp, u64 arg1, u64 arg2, u64 arg3, 64
68 u64 arg4, u64 arg5); 65#define efi_call_virt(f, ...) \
69extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
70 u64 arg4, u64 arg5, u64 arg6);
71
72#define efi_call_phys0(f) \
73 efi_call0((f))
74#define efi_call_phys1(f, a1) \
75 efi_call1((f), (u64)(a1))
76#define efi_call_phys2(f, a1, a2) \
77 efi_call2((f), (u64)(a1), (u64)(a2))
78#define efi_call_phys3(f, a1, a2, a3) \
79 efi_call3((f), (u64)(a1), (u64)(a2), (u64)(a3))
80#define efi_call_phys4(f, a1, a2, a3, a4) \
81 efi_call4((f), (u64)(a1), (u64)(a2), (u64)(a3), \
82 (u64)(a4))
83#define efi_call_phys5(f, a1, a2, a3, a4, a5) \
84 efi_call5((f), (u64)(a1), (u64)(a2), (u64)(a3), \
85 (u64)(a4), (u64)(a5))
86#define efi_call_phys6(f, a1, a2, a3, a4, a5, a6) \
87 efi_call6((f), (u64)(a1), (u64)(a2), (u64)(a3), \
88 (u64)(a4), (u64)(a5), (u64)(a6))
89
90#define _efi_call_virtX(x, f, ...) \
91({ \ 66({ \
92 efi_status_t __s; \ 67 efi_status_t __s; \
93 \ 68 \
94 efi_sync_low_kernel_mappings(); \ 69 efi_sync_low_kernel_mappings(); \
95 preempt_disable(); \ 70 preempt_disable(); \
96 __s = efi_call##x((void *)efi.systab->runtime->f, __VA_ARGS__); \ 71 __kernel_fpu_begin(); \
72 __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__); \
73 __kernel_fpu_end(); \
97 preempt_enable(); \ 74 preempt_enable(); \
98 __s; \ 75 __s; \
99}) 76})
100 77
101#define efi_call_virt0(f) \ 78/*
102 _efi_call_virtX(0, f) 79 * All X86_64 virt calls return non-void values. Thus, use non-void call for
103#define efi_call_virt1(f, a1) \ 80 * virt calls that would be void on X86_32.
104 _efi_call_virtX(1, f, (u64)(a1)) 81 */
105#define efi_call_virt2(f, a1, a2) \ 82#define __efi_call_virt(f, args...) efi_call_virt(f, args)
106 _efi_call_virtX(2, f, (u64)(a1), (u64)(a2))
107#define efi_call_virt3(f, a1, a2, a3) \
108 _efi_call_virtX(3, f, (u64)(a1), (u64)(a2), (u64)(a3))
109#define efi_call_virt4(f, a1, a2, a3, a4) \
110 _efi_call_virtX(4, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4))
111#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
112 _efi_call_virtX(5, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5))
113#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
114 _efi_call_virtX(6, f, (u64)(a1), (u64)(a2), (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
115 83
116extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, 84extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
117 u32 type, u64 attribute); 85 u32 type, u64 attribute);
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
index cea1c76d49bf..115e3689cd53 100644
--- a/arch/x86/include/asm/fpu-internal.h
+++ b/arch/x86/include/asm/fpu-internal.h
@@ -87,22 +87,22 @@ static inline int is_x32_frame(void)
87 87
88static __always_inline __pure bool use_eager_fpu(void) 88static __always_inline __pure bool use_eager_fpu(void)
89{ 89{
90 return static_cpu_has(X86_FEATURE_EAGER_FPU); 90 return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
91} 91}
92 92
93static __always_inline __pure bool use_xsaveopt(void) 93static __always_inline __pure bool use_xsaveopt(void)
94{ 94{
95 return static_cpu_has(X86_FEATURE_XSAVEOPT); 95 return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
96} 96}
97 97
98static __always_inline __pure bool use_xsave(void) 98static __always_inline __pure bool use_xsave(void)
99{ 99{
100 return static_cpu_has(X86_FEATURE_XSAVE); 100 return static_cpu_has_safe(X86_FEATURE_XSAVE);
101} 101}
102 102
103static __always_inline __pure bool use_fxsr(void) 103static __always_inline __pure bool use_fxsr(void)
104{ 104{
105 return static_cpu_has(X86_FEATURE_FXSR); 105 return static_cpu_has_safe(X86_FEATURE_FXSR);
106} 106}
107 107
108static inline void fx_finit(struct i387_fxsave_struct *fx) 108static inline void fx_finit(struct i387_fxsave_struct *fx)
@@ -293,7 +293,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
293 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception 293 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
294 is pending. Clear the x87 state here by setting it to fixed 294 is pending. Clear the x87 state here by setting it to fixed
295 values. "m" is a random variable that should be in L1 */ 295 values. "m" is a random variable that should be in L1 */
296 if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) { 296 if (unlikely(static_cpu_has_safe(X86_FEATURE_FXSAVE_LEAK))) {
297 asm volatile( 297 asm volatile(
298 "fnclex\n\t" 298 "fnclex\n\t"
299 "emms\n\t" 299 "emms\n\t"