diff options
| author | Jeremy Fitzhardinge <jeremy@goop.org> | 2009-10-12 19:32:43 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2009-10-13 03:22:01 -0400 |
| commit | 71999d9862e667f1fd14f8fbfa0cce6d855bad3f (patch) | |
| tree | b228a66c29154a446dc3adaf6f0dae8ee4b908d3 /arch/x86 | |
| parent | d1705c558c95418378b11a0be963fe1b3e2fa381 (diff) | |
x86/paravirt: Use normal calling sequences for irq enable/disable
Bastian Blank reported a boot crash with stackprotector enabled,
and debugged it back to edx register corruption.
For historical reasons irq enable/disable/save/restore had special
calling sequences to make them more efficient. With the more
recent introduction of higher-level and more general optimisations
this is no longer necessary so we can just use the normal PVOP_
macros.
This fixes some residual bugs in the old implementations which left
edx liable to inadvertent clobbering. Also, fix some bugs in
__PVOP_VCALLEESAVE which were revealed by actual use.
Reported-by: Bastian Blank <bastian@waldi.eu.org>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Stable Kernel <stable@kernel.org>
Cc: Xen-devel <xen-devel@lists.xensource.com>
LKML-Reference: <4AD3BC9B.7040501@goop.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
| -rw-r--r-- | arch/x86/include/asm/paravirt.h | 28 | ||||
| -rw-r--r-- | arch/x86/include/asm/paravirt_types.h | 10 |
2 files changed, 10 insertions, 28 deletions
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 8aebcc41041d..efb38994859c 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h | |||
| @@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) | |||
| 840 | 840 | ||
| 841 | static inline unsigned long __raw_local_save_flags(void) | 841 | static inline unsigned long __raw_local_save_flags(void) |
| 842 | { | 842 | { |
| 843 | unsigned long f; | 843 | return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); |
| 844 | |||
| 845 | asm volatile(paravirt_alt(PARAVIRT_CALL) | ||
| 846 | : "=a"(f) | ||
| 847 | : paravirt_type(pv_irq_ops.save_fl), | ||
| 848 | paravirt_clobber(CLBR_EAX) | ||
| 849 | : "memory", "cc"); | ||
| 850 | return f; | ||
| 851 | } | 844 | } |
| 852 | 845 | ||
| 853 | static inline void raw_local_irq_restore(unsigned long f) | 846 | static inline void raw_local_irq_restore(unsigned long f) |
| 854 | { | 847 | { |
| 855 | asm volatile(paravirt_alt(PARAVIRT_CALL) | 848 | PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); |
| 856 | : "=a"(f) | ||
| 857 | : PV_FLAGS_ARG(f), | ||
| 858 | paravirt_type(pv_irq_ops.restore_fl), | ||
| 859 | paravirt_clobber(CLBR_EAX) | ||
| 860 | : "memory", "cc"); | ||
| 861 | } | 849 | } |
| 862 | 850 | ||
| 863 | static inline void raw_local_irq_disable(void) | 851 | static inline void raw_local_irq_disable(void) |
| 864 | { | 852 | { |
| 865 | asm volatile(paravirt_alt(PARAVIRT_CALL) | 853 | PVOP_VCALLEE0(pv_irq_ops.irq_disable); |
| 866 | : | ||
| 867 | : paravirt_type(pv_irq_ops.irq_disable), | ||
| 868 | paravirt_clobber(CLBR_EAX) | ||
| 869 | : "memory", "eax", "cc"); | ||
| 870 | } | 854 | } |
| 871 | 855 | ||
| 872 | static inline void raw_local_irq_enable(void) | 856 | static inline void raw_local_irq_enable(void) |
| 873 | { | 857 | { |
| 874 | asm volatile(paravirt_alt(PARAVIRT_CALL) | 858 | PVOP_VCALLEE0(pv_irq_ops.irq_enable); |
| 875 | : | ||
| 876 | : paravirt_type(pv_irq_ops.irq_enable), | ||
| 877 | paravirt_clobber(CLBR_EAX) | ||
| 878 | : "memory", "eax", "cc"); | ||
| 879 | } | 859 | } |
| 880 | 860 | ||
| 881 | static inline unsigned long __raw_local_irq_save(void) | 861 | static inline unsigned long __raw_local_irq_save(void) |
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index dd0f5b32489d..9357473c8da0 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h | |||
| @@ -494,10 +494,11 @@ int paravirt_disable_iospace(void); | |||
| 494 | #define EXTRA_CLOBBERS | 494 | #define EXTRA_CLOBBERS |
| 495 | #define VEXTRA_CLOBBERS | 495 | #define VEXTRA_CLOBBERS |
| 496 | #else /* CONFIG_X86_64 */ | 496 | #else /* CONFIG_X86_64 */ |
| 497 | /* [re]ax isn't an arg, but the return val */ | ||
| 497 | #define PVOP_VCALL_ARGS \ | 498 | #define PVOP_VCALL_ARGS \ |
| 498 | unsigned long __edi = __edi, __esi = __esi, \ | 499 | unsigned long __edi = __edi, __esi = __esi, \ |
| 499 | __edx = __edx, __ecx = __ecx | 500 | __edx = __edx, __ecx = __ecx, __eax = __eax |
| 500 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax | 501 | #define PVOP_CALL_ARGS PVOP_VCALL_ARGS |
| 501 | 502 | ||
| 502 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) | 503 | #define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x)) |
| 503 | #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) | 504 | #define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x)) |
| @@ -509,6 +510,7 @@ int paravirt_disable_iospace(void); | |||
| 509 | "=c" (__ecx) | 510 | "=c" (__ecx) |
| 510 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) | 511 | #define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax) |
| 511 | 512 | ||
| 513 | /* void functions are still allowed [re]ax for scratch */ | ||
| 512 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) | 514 | #define PVOP_VCALLEE_CLOBBERS "=a" (__eax) |
| 513 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS | 515 | #define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS |
| 514 | 516 | ||
| @@ -583,8 +585,8 @@ int paravirt_disable_iospace(void); | |||
| 583 | VEXTRA_CLOBBERS, \ | 585 | VEXTRA_CLOBBERS, \ |
| 584 | pre, post, ##__VA_ARGS__) | 586 | pre, post, ##__VA_ARGS__) |
| 585 | 587 | ||
| 586 | #define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \ | 588 | #define __PVOP_VCALLEESAVE(op, pre, post, ...) \ |
| 587 | ____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \ | 589 | ____PVOP_VCALL(op.func, CLBR_RET_REG, \ |
| 588 | PVOP_VCALLEE_CLOBBERS, , \ | 590 | PVOP_VCALLEE_CLOBBERS, , \ |
| 589 | pre, post, ##__VA_ARGS__) | 591 | pre, post, ##__VA_ARGS__) |
| 590 | 592 | ||
