diff options
-rw-r--r-- | arch/x86/kernel/asm-offsets_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/entry_32.S | 8 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_32.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/vmi_32.c | 4 | ||||
-rw-r--r-- | arch/x86/xen/enlighten.c | 2 | ||||
-rw-r--r-- | include/asm-x86/irqflags.h | 246 | ||||
-rw-r--r-- | include/asm-x86/irqflags_32.h | 195 | ||||
-rw-r--r-- | include/asm-x86/irqflags_64.h | 174 | ||||
-rw-r--r-- | include/asm-x86/paravirt.h | 9 |
9 files changed, 261 insertions, 389 deletions
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 0e45981b2dd7..c1ccfabb4a9e 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c | |||
@@ -123,7 +123,7 @@ void foo(void) | |||
123 | OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); | 123 | OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable); |
124 | OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); | 124 | OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable); |
125 | OFFSET(PV_CPU_iret, pv_cpu_ops, iret); | 125 | OFFSET(PV_CPU_iret, pv_cpu_ops, iret); |
126 | OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); | 126 | OFFSET(PV_CPU_irq_enable_syscall_ret, pv_cpu_ops, irq_enable_syscall_ret); |
127 | OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); | 127 | OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); |
128 | #endif | 128 | #endif |
129 | 129 | ||
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index dc7f938e5015..d63609dd64b9 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -58,7 +58,7 @@ | |||
58 | * for paravirtualization. The following will never clobber any registers: | 58 | * for paravirtualization. The following will never clobber any registers: |
59 | * INTERRUPT_RETURN (aka. "iret") | 59 | * INTERRUPT_RETURN (aka. "iret") |
60 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") | 60 | * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax") |
61 | * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit"). | 61 | * ENABLE_INTERRUPTS_SYSCALL_RET (aka "sti; sysexit"). |
62 | * | 62 | * |
63 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must | 63 | * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must |
64 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). | 64 | * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY). |
@@ -351,7 +351,7 @@ sysenter_past_esp: | |||
351 | xorl %ebp,%ebp | 351 | xorl %ebp,%ebp |
352 | TRACE_IRQS_ON | 352 | TRACE_IRQS_ON |
353 | 1: mov PT_FS(%esp), %fs | 353 | 1: mov PT_FS(%esp), %fs |
354 | ENABLE_INTERRUPTS_SYSEXIT | 354 | ENABLE_INTERRUPTS_SYSCALL_RET |
355 | CFI_ENDPROC | 355 | CFI_ENDPROC |
356 | .pushsection .fixup,"ax" | 356 | .pushsection .fixup,"ax" |
357 | 2: movl $0,PT_FS(%esp) | 357 | 2: movl $0,PT_FS(%esp) |
@@ -882,10 +882,10 @@ ENTRY(native_iret) | |||
882 | .previous | 882 | .previous |
883 | END(native_iret) | 883 | END(native_iret) |
884 | 884 | ||
885 | ENTRY(native_irq_enable_sysexit) | 885 | ENTRY(native_irq_enable_syscall_ret) |
886 | sti | 886 | sti |
887 | sysexit | 887 | sysexit |
888 | END(native_irq_enable_sysexit) | 888 | END(native_irq_enable_syscall_ret) |
889 | #endif | 889 | #endif |
890 | 890 | ||
891 | KPROBE_ENTRY(int3) | 891 | KPROBE_ENTRY(int3) |
diff --git a/arch/x86/kernel/paravirt_32.c b/arch/x86/kernel/paravirt_32.c index f5000799f8ef..706b0562ea40 100644 --- a/arch/x86/kernel/paravirt_32.c +++ b/arch/x86/kernel/paravirt_32.c | |||
@@ -60,7 +60,7 @@ DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); | |||
60 | DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); | 60 | DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); |
61 | DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); | 61 | DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); |
62 | DEF_NATIVE(pv_cpu_ops, iret, "iret"); | 62 | DEF_NATIVE(pv_cpu_ops, iret, "iret"); |
63 | DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit"); | 63 | DEF_NATIVE(pv_cpu_ops, irq_enable_syscall_ret, "sti; sysexit"); |
64 | DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); | 64 | DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); |
65 | DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); | 65 | DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); |
66 | DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); | 66 | DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); |
@@ -88,7 +88,7 @@ static unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |||
88 | SITE(pv_irq_ops, restore_fl); | 88 | SITE(pv_irq_ops, restore_fl); |
89 | SITE(pv_irq_ops, save_fl); | 89 | SITE(pv_irq_ops, save_fl); |
90 | SITE(pv_cpu_ops, iret); | 90 | SITE(pv_cpu_ops, iret); |
91 | SITE(pv_cpu_ops, irq_enable_sysexit); | 91 | SITE(pv_cpu_ops, irq_enable_syscall_ret); |
92 | SITE(pv_mmu_ops, read_cr2); | 92 | SITE(pv_mmu_ops, read_cr2); |
93 | SITE(pv_mmu_ops, read_cr3); | 93 | SITE(pv_mmu_ops, read_cr3); |
94 | SITE(pv_mmu_ops, write_cr3); | 94 | SITE(pv_mmu_ops, write_cr3); |
@@ -186,7 +186,7 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf, | |||
186 | /* If the operation is a nop, then nop the callsite */ | 186 | /* If the operation is a nop, then nop the callsite */ |
187 | ret = paravirt_patch_nop(); | 187 | ret = paravirt_patch_nop(); |
188 | else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || | 188 | else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || |
189 | type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit)) | 189 | type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret)) |
190 | /* If operation requires a jmp, then jmp */ | 190 | /* If operation requires a jmp, then jmp */ |
191 | ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); | 191 | ret = paravirt_patch_jmp(insnbuf, opfunc, addr, len); |
192 | else | 192 | else |
@@ -237,7 +237,7 @@ static void native_flush_tlb_single(unsigned long addr) | |||
237 | 237 | ||
238 | /* These are in entry.S */ | 238 | /* These are in entry.S */ |
239 | extern void native_iret(void); | 239 | extern void native_iret(void); |
240 | extern void native_irq_enable_sysexit(void); | 240 | extern void native_irq_enable_syscall_ret(void); |
241 | 241 | ||
242 | static int __init print_banner(void) | 242 | static int __init print_banner(void) |
243 | { | 243 | { |
@@ -384,7 +384,7 @@ struct pv_cpu_ops pv_cpu_ops = { | |||
384 | .write_idt_entry = write_dt_entry, | 384 | .write_idt_entry = write_dt_entry, |
385 | .load_esp0 = native_load_esp0, | 385 | .load_esp0 = native_load_esp0, |
386 | 386 | ||
387 | .irq_enable_sysexit = native_irq_enable_sysexit, | 387 | .irq_enable_syscall_ret = native_irq_enable_syscall_ret, |
388 | .iret = native_iret, | 388 | .iret = native_iret, |
389 | 389 | ||
390 | .set_iopl_mask = native_set_iopl_mask, | 390 | .set_iopl_mask = native_set_iopl_mask, |
diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c index f02bad68abaa..aacce426cbd0 100644 --- a/arch/x86/kernel/vmi_32.c +++ b/arch/x86/kernel/vmi_32.c | |||
@@ -148,7 +148,7 @@ static unsigned vmi_patch(u8 type, u16 clobbers, void *insns, | |||
148 | insns, eip); | 148 | insns, eip); |
149 | case PARAVIRT_PATCH(pv_cpu_ops.iret): | 149 | case PARAVIRT_PATCH(pv_cpu_ops.iret): |
150 | return patch_internal(VMI_CALL_IRET, len, insns, eip); | 150 | return patch_internal(VMI_CALL_IRET, len, insns, eip); |
151 | case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit): | 151 | case PARAVIRT_PATCH(pv_cpu_ops.irq_enable_syscall_ret): |
152 | return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip); | 152 | return patch_internal(VMI_CALL_SYSEXIT, len, insns, eip); |
153 | default: | 153 | default: |
154 | break; | 154 | break; |
@@ -870,7 +870,7 @@ static inline int __init activate_vmi(void) | |||
870 | * the backend. They are performance critical anyway, so requiring | 870 | * the backend. They are performance critical anyway, so requiring |
871 | * a patch is not a big problem. | 871 | * a patch is not a big problem. |
872 | */ | 872 | */ |
873 | pv_cpu_ops.irq_enable_sysexit = (void *)0xfeedbab0; | 873 | pv_cpu_ops.irq_enable_syscall_ret = (void *)0xfeedbab0; |
874 | pv_cpu_ops.iret = (void *)0xbadbab0; | 874 | pv_cpu_ops.iret = (void *)0xbadbab0; |
875 | 875 | ||
876 | #ifdef CONFIG_SMP | 876 | #ifdef CONFIG_SMP |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 00829401389e..d3574485cb15 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -953,7 +953,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = { | |||
953 | .read_pmc = native_read_pmc, | 953 | .read_pmc = native_read_pmc, |
954 | 954 | ||
955 | .iret = (void *)&hypercall_page[__HYPERVISOR_iret], | 955 | .iret = (void *)&hypercall_page[__HYPERVISOR_iret], |
956 | .irq_enable_sysexit = NULL, /* never called */ | 956 | .irq_enable_syscall_ret = NULL, /* never called */ |
957 | 957 | ||
958 | .load_tr_desc = paravirt_nop, | 958 | .load_tr_desc = paravirt_nop, |
959 | .set_ldt = xen_set_ldt, | 959 | .set_ldt = xen_set_ldt, |
diff --git a/include/asm-x86/irqflags.h b/include/asm-x86/irqflags.h index 1b695ff52687..92021c1ffa3a 100644 --- a/include/asm-x86/irqflags.h +++ b/include/asm-x86/irqflags.h | |||
@@ -1,5 +1,245 @@ | |||
1 | #ifdef CONFIG_X86_32 | 1 | #ifndef _X86_IRQFLAGS_H_ |
2 | # include "irqflags_32.h" | 2 | #define _X86_IRQFLAGS_H_ |
3 | |||
4 | #include <asm/processor-flags.h> | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
7 | /* | ||
8 | * Interrupt control: | ||
9 | */ | ||
10 | |||
11 | static inline unsigned long native_save_fl(void) | ||
12 | { | ||
13 | unsigned long flags; | ||
14 | |||
15 | __asm__ __volatile__( | ||
16 | "# __raw_save_flags\n\t" | ||
17 | "pushf ; pop %0" | ||
18 | : "=g" (flags) | ||
19 | : /* no input */ | ||
20 | : "memory" | ||
21 | ); | ||
22 | |||
23 | return flags; | ||
24 | } | ||
25 | |||
26 | static inline void native_restore_fl(unsigned long flags) | ||
27 | { | ||
28 | __asm__ __volatile__( | ||
29 | "push %0 ; popf" | ||
30 | : /* no output */ | ||
31 | :"g" (flags) | ||
32 | :"memory", "cc" | ||
33 | ); | ||
34 | } | ||
35 | |||
36 | static inline void native_irq_disable(void) | ||
37 | { | ||
38 | asm volatile("cli": : :"memory"); | ||
39 | } | ||
40 | |||
41 | static inline void native_irq_enable(void) | ||
42 | { | ||
43 | asm volatile("sti": : :"memory"); | ||
44 | } | ||
45 | |||
46 | static inline void native_safe_halt(void) | ||
47 | { | ||
48 | asm volatile("sti; hlt": : :"memory"); | ||
49 | } | ||
50 | |||
51 | static inline void native_halt(void) | ||
52 | { | ||
53 | asm volatile("hlt": : :"memory"); | ||
54 | } | ||
55 | |||
56 | #endif | ||
57 | |||
58 | #ifdef CONFIG_PARAVIRT | ||
59 | #include <asm/paravirt.h> | ||
60 | #else | ||
61 | #ifndef __ASSEMBLY__ | ||
62 | |||
63 | static inline unsigned long __raw_local_save_flags(void) | ||
64 | { | ||
65 | return native_save_fl(); | ||
66 | } | ||
67 | |||
68 | static inline void raw_local_irq_restore(unsigned long flags) | ||
69 | { | ||
70 | native_restore_fl(flags); | ||
71 | } | ||
72 | |||
73 | static inline void raw_local_irq_disable(void) | ||
74 | { | ||
75 | native_irq_disable(); | ||
76 | } | ||
77 | |||
78 | static inline void raw_local_irq_enable(void) | ||
79 | { | ||
80 | native_irq_enable(); | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Used in the idle loop; sti takes one instruction cycle | ||
85 | * to complete: | ||
86 | */ | ||
87 | static inline void raw_safe_halt(void) | ||
88 | { | ||
89 | native_safe_halt(); | ||
90 | } | ||
91 | |||
92 | /* | ||
93 | * Used when interrupts are already enabled or to | ||
94 | * shutdown the processor: | ||
95 | */ | ||
96 | static inline void halt(void) | ||
97 | { | ||
98 | native_halt(); | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * For spinlocks, etc: | ||
103 | */ | ||
104 | static inline unsigned long __raw_local_irq_save(void) | ||
105 | { | ||
106 | unsigned long flags = __raw_local_save_flags(); | ||
107 | |||
108 | raw_local_irq_disable(); | ||
109 | |||
110 | return flags; | ||
111 | } | ||
112 | #else | ||
113 | |||
114 | #define ENABLE_INTERRUPTS(x) sti | ||
115 | #define DISABLE_INTERRUPTS(x) cli | ||
116 | |||
117 | #ifdef CONFIG_X86_64 | ||
118 | #define INTERRUPT_RETURN iretq | ||
119 | #define ENABLE_INTERRUPTS_SYSCALL_RET \ | ||
120 | movq %gs:pda_oldrsp, %rsp; \ | ||
121 | swapgs; \ | ||
122 | sysretq; | ||
123 | #else | ||
124 | #define INTERRUPT_RETURN iret | ||
125 | #define ENABLE_INTERRUPTS_SYSCALL_RET sti; sysexit | ||
126 | #define GET_CR0_INTO_EAX movl %cr0, %eax | ||
127 | #endif | ||
128 | |||
129 | |||
130 | #endif /* __ASSEMBLY__ */ | ||
131 | #endif /* CONFIG_PARAVIRT */ | ||
132 | |||
133 | #ifndef __ASSEMBLY__ | ||
134 | #define raw_local_save_flags(flags) \ | ||
135 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
136 | |||
137 | #define raw_local_irq_save(flags) \ | ||
138 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
139 | |||
140 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
141 | { | ||
142 | return !(flags & X86_EFLAGS_IF); | ||
143 | } | ||
144 | |||
145 | static inline int raw_irqs_disabled(void) | ||
146 | { | ||
147 | unsigned long flags = __raw_local_save_flags(); | ||
148 | |||
149 | return raw_irqs_disabled_flags(flags); | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * makes the traced hardirq state match with the machine state | ||
154 | * | ||
155 | * should be a rarely used function, only in places where its | ||
156 | * otherwise impossible to know the irq state, like in traps. | ||
157 | */ | ||
158 | static inline void trace_hardirqs_fixup_flags(unsigned long flags) | ||
159 | { | ||
160 | if (raw_irqs_disabled_flags(flags)) | ||
161 | trace_hardirqs_off(); | ||
162 | else | ||
163 | trace_hardirqs_on(); | ||
164 | } | ||
165 | |||
166 | static inline void trace_hardirqs_fixup(void) | ||
167 | { | ||
168 | unsigned long flags = __raw_local_save_flags(); | ||
169 | |||
170 | trace_hardirqs_fixup_flags(flags); | ||
171 | } | ||
172 | |||
3 | #else | 173 | #else |
4 | # include "irqflags_64.h" | 174 | |
175 | #ifdef CONFIG_X86_64 | ||
176 | /* | ||
177 | * Currently paravirt can't handle swapgs nicely when we | ||
178 | * don't have a stack we can rely on (such as a user space | ||
179 | * stack). So we either find a way around these or just fault | ||
180 | * and emulate if a guest tries to call swapgs directly. | ||
181 | * | ||
182 | * Either way, this is a good way to document that we don't | ||
183 | * have a reliable stack. x86_64 only. | ||
184 | */ | ||
185 | #define SWAPGS_UNSAFE_STACK swapgs | ||
186 | #define ARCH_TRACE_IRQS_ON call trace_hardirqs_on_thunk | ||
187 | #define ARCH_TRACE_IRQS_OFF call trace_hardirqs_off_thunk | ||
188 | #define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk | ||
189 | #define ARCH_LOCKDEP_SYS_EXIT_IRQ \ | ||
190 | TRACE_IRQS_ON; \ | ||
191 | sti; \ | ||
192 | SAVE_REST; \ | ||
193 | LOCKDEP_SYS_EXIT; \ | ||
194 | RESTORE_REST; \ | ||
195 | cli; \ | ||
196 | TRACE_IRQS_OFF; | ||
197 | |||
198 | #else | ||
199 | #define ARCH_TRACE_IRQS_ON \ | ||
200 | pushl %eax; \ | ||
201 | pushl %ecx; \ | ||
202 | pushl %edx; \ | ||
203 | call trace_hardirqs_on; \ | ||
204 | popl %edx; \ | ||
205 | popl %ecx; \ | ||
206 | popl %eax; | ||
207 | |||
208 | #define ARCH_TRACE_IRQS_OFF \ | ||
209 | pushl %eax; \ | ||
210 | pushl %ecx; \ | ||
211 | pushl %edx; \ | ||
212 | call trace_hardirqs_off; \ | ||
213 | popl %edx; \ | ||
214 | popl %ecx; \ | ||
215 | popl %eax; | ||
216 | |||
217 | #define ARCH_LOCKDEP_SYS_EXIT \ | ||
218 | pushl %eax; \ | ||
219 | pushl %ecx; \ | ||
220 | pushl %edx; \ | ||
221 | call lockdep_sys_exit; \ | ||
222 | popl %edx; \ | ||
223 | popl %ecx; \ | ||
224 | popl %eax; | ||
225 | |||
226 | #define ARCH_LOCKDEP_SYS_EXIT_IRQ | ||
227 | #endif | ||
228 | |||
229 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
230 | # define TRACE_IRQS_ON ARCH_TRACE_IRQS_ON | ||
231 | # define TRACE_IRQS_OFF ARCH_TRACE_IRQS_OFF | ||
232 | #else | ||
233 | # define TRACE_IRQS_ON | ||
234 | # define TRACE_IRQS_OFF | ||
235 | #endif | ||
236 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
237 | # define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT | ||
238 | # define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ | ||
239 | # else | ||
240 | # define LOCKDEP_SYS_EXIT | ||
241 | # define LOCKDEP_SYS_EXIT_IRQ | ||
242 | # endif | ||
243 | |||
244 | #endif /* __ASSEMBLY__ */ | ||
5 | #endif | 245 | #endif |
diff --git a/include/asm-x86/irqflags_32.h b/include/asm-x86/irqflags_32.h deleted file mode 100644 index 98b21b9bdce8..000000000000 --- a/include/asm-x86/irqflags_32.h +++ /dev/null | |||
@@ -1,195 +0,0 @@ | |||
1 | /* | ||
2 | * IRQ flags handling | ||
3 | * | ||
4 | * This file gets included from lowlevel asm headers too, to provide | ||
5 | * wrapped versions of the local_irq_*() APIs, based on the | ||
6 | * raw_local_irq_*() functions from the lowlevel headers. | ||
7 | */ | ||
8 | #ifndef _ASM_IRQFLAGS_H | ||
9 | #define _ASM_IRQFLAGS_H | ||
10 | #include <asm/processor-flags.h> | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | static inline unsigned long native_save_fl(void) | ||
14 | { | ||
15 | unsigned long f; | ||
16 | asm volatile("pushfl ; popl %0":"=g" (f): /* no input */); | ||
17 | return f; | ||
18 | } | ||
19 | |||
20 | static inline void native_restore_fl(unsigned long f) | ||
21 | { | ||
22 | asm volatile("pushl %0 ; popfl": /* no output */ | ||
23 | :"g" (f) | ||
24 | :"memory", "cc"); | ||
25 | } | ||
26 | |||
27 | static inline void native_irq_disable(void) | ||
28 | { | ||
29 | asm volatile("cli": : :"memory"); | ||
30 | } | ||
31 | |||
32 | static inline void native_irq_enable(void) | ||
33 | { | ||
34 | asm volatile("sti": : :"memory"); | ||
35 | } | ||
36 | |||
37 | static inline void native_safe_halt(void) | ||
38 | { | ||
39 | asm volatile("sti; hlt": : :"memory"); | ||
40 | } | ||
41 | |||
42 | static inline void native_halt(void) | ||
43 | { | ||
44 | asm volatile("hlt": : :"memory"); | ||
45 | } | ||
46 | #endif /* __ASSEMBLY__ */ | ||
47 | |||
48 | #ifdef CONFIG_PARAVIRT | ||
49 | #include <asm/paravirt.h> | ||
50 | #else | ||
51 | #ifndef __ASSEMBLY__ | ||
52 | |||
53 | static inline unsigned long __raw_local_save_flags(void) | ||
54 | { | ||
55 | return native_save_fl(); | ||
56 | } | ||
57 | |||
58 | static inline void raw_local_irq_restore(unsigned long flags) | ||
59 | { | ||
60 | native_restore_fl(flags); | ||
61 | } | ||
62 | |||
63 | static inline void raw_local_irq_disable(void) | ||
64 | { | ||
65 | native_irq_disable(); | ||
66 | } | ||
67 | |||
68 | static inline void raw_local_irq_enable(void) | ||
69 | { | ||
70 | native_irq_enable(); | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Used in the idle loop; sti takes one instruction cycle | ||
75 | * to complete: | ||
76 | */ | ||
77 | static inline void raw_safe_halt(void) | ||
78 | { | ||
79 | native_safe_halt(); | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Used when interrupts are already enabled or to | ||
84 | * shutdown the processor: | ||
85 | */ | ||
86 | static inline void halt(void) | ||
87 | { | ||
88 | native_halt(); | ||
89 | } | ||
90 | |||
91 | /* | ||
92 | * For spinlocks, etc: | ||
93 | */ | ||
94 | static inline unsigned long __raw_local_irq_save(void) | ||
95 | { | ||
96 | unsigned long flags = __raw_local_save_flags(); | ||
97 | |||
98 | raw_local_irq_disable(); | ||
99 | |||
100 | return flags; | ||
101 | } | ||
102 | |||
103 | #else | ||
104 | #define DISABLE_INTERRUPTS(clobbers) cli | ||
105 | #define ENABLE_INTERRUPTS(clobbers) sti | ||
106 | #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit | ||
107 | #define INTERRUPT_RETURN iret | ||
108 | #define GET_CR0_INTO_EAX movl %cr0, %eax | ||
109 | #endif /* __ASSEMBLY__ */ | ||
110 | #endif /* CONFIG_PARAVIRT */ | ||
111 | |||
112 | #ifndef __ASSEMBLY__ | ||
113 | #define raw_local_save_flags(flags) \ | ||
114 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
115 | |||
116 | #define raw_local_irq_save(flags) \ | ||
117 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
118 | |||
119 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
120 | { | ||
121 | return !(flags & X86_EFLAGS_IF); | ||
122 | } | ||
123 | |||
124 | static inline int raw_irqs_disabled(void) | ||
125 | { | ||
126 | unsigned long flags = __raw_local_save_flags(); | ||
127 | |||
128 | return raw_irqs_disabled_flags(flags); | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * makes the traced hardirq state match with the machine state | ||
133 | * | ||
134 | * should be a rarely used function, only in places where its | ||
135 | * otherwise impossible to know the irq state, like in traps. | ||
136 | */ | ||
137 | static inline void trace_hardirqs_fixup_flags(unsigned long flags) | ||
138 | { | ||
139 | if (raw_irqs_disabled_flags(flags)) | ||
140 | trace_hardirqs_off(); | ||
141 | else | ||
142 | trace_hardirqs_on(); | ||
143 | } | ||
144 | |||
145 | static inline void trace_hardirqs_fixup(void) | ||
146 | { | ||
147 | unsigned long flags = __raw_local_save_flags(); | ||
148 | |||
149 | trace_hardirqs_fixup_flags(flags); | ||
150 | } | ||
151 | #endif /* __ASSEMBLY__ */ | ||
152 | |||
153 | /* | ||
154 | * Do the CPU's IRQ-state tracing from assembly code. We call a | ||
155 | * C function, so save all the C-clobbered registers: | ||
156 | */ | ||
157 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
158 | |||
159 | # define TRACE_IRQS_ON \ | ||
160 | pushl %eax; \ | ||
161 | pushl %ecx; \ | ||
162 | pushl %edx; \ | ||
163 | call trace_hardirqs_on; \ | ||
164 | popl %edx; \ | ||
165 | popl %ecx; \ | ||
166 | popl %eax; | ||
167 | |||
168 | # define TRACE_IRQS_OFF \ | ||
169 | pushl %eax; \ | ||
170 | pushl %ecx; \ | ||
171 | pushl %edx; \ | ||
172 | call trace_hardirqs_off; \ | ||
173 | popl %edx; \ | ||
174 | popl %ecx; \ | ||
175 | popl %eax; | ||
176 | |||
177 | #else | ||
178 | # define TRACE_IRQS_ON | ||
179 | # define TRACE_IRQS_OFF | ||
180 | #endif | ||
181 | |||
182 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
183 | # define LOCKDEP_SYS_EXIT \ | ||
184 | pushl %eax; \ | ||
185 | pushl %ecx; \ | ||
186 | pushl %edx; \ | ||
187 | call lockdep_sys_exit; \ | ||
188 | popl %edx; \ | ||
189 | popl %ecx; \ | ||
190 | popl %eax; | ||
191 | #else | ||
192 | # define LOCKDEP_SYS_EXIT | ||
193 | #endif | ||
194 | |||
195 | #endif | ||
diff --git a/include/asm-x86/irqflags_64.h b/include/asm-x86/irqflags_64.h deleted file mode 100644 index 38c07db733cf..000000000000 --- a/include/asm-x86/irqflags_64.h +++ /dev/null | |||
@@ -1,174 +0,0 @@ | |||
1 | /* | ||
2 | * IRQ flags handling | ||
3 | * | ||
4 | * This file gets included from lowlevel asm headers too, to provide | ||
5 | * wrapped versions of the local_irq_*() APIs, based on the | ||
6 | * raw_local_irq_*() functions from the lowlevel headers. | ||
7 | */ | ||
8 | #ifndef _ASM_IRQFLAGS_H | ||
9 | #define _ASM_IRQFLAGS_H | ||
10 | #include <asm/processor-flags.h> | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | /* | ||
14 | * Interrupt control: | ||
15 | */ | ||
16 | |||
17 | static inline unsigned long __raw_local_save_flags(void) | ||
18 | { | ||
19 | unsigned long flags; | ||
20 | |||
21 | __asm__ __volatile__( | ||
22 | "# __raw_save_flags\n\t" | ||
23 | "pushfq ; popq %q0" | ||
24 | : "=g" (flags) | ||
25 | : /* no input */ | ||
26 | : "memory" | ||
27 | ); | ||
28 | |||
29 | return flags; | ||
30 | } | ||
31 | |||
32 | #define raw_local_save_flags(flags) \ | ||
33 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
34 | |||
35 | static inline void raw_local_irq_restore(unsigned long flags) | ||
36 | { | ||
37 | __asm__ __volatile__( | ||
38 | "pushq %0 ; popfq" | ||
39 | : /* no output */ | ||
40 | :"g" (flags) | ||
41 | :"memory", "cc" | ||
42 | ); | ||
43 | } | ||
44 | |||
45 | #ifdef CONFIG_X86_VSMP | ||
46 | |||
47 | /* | ||
48 | * Interrupt control for the VSMP architecture: | ||
49 | */ | ||
50 | |||
51 | static inline void raw_local_irq_disable(void) | ||
52 | { | ||
53 | unsigned long flags = __raw_local_save_flags(); | ||
54 | |||
55 | raw_local_irq_restore((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); | ||
56 | } | ||
57 | |||
58 | static inline void raw_local_irq_enable(void) | ||
59 | { | ||
60 | unsigned long flags = __raw_local_save_flags(); | ||
61 | |||
62 | raw_local_irq_restore((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); | ||
63 | } | ||
64 | |||
65 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
66 | { | ||
67 | return !(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC); | ||
68 | } | ||
69 | |||
70 | #else /* CONFIG_X86_VSMP */ | ||
71 | |||
72 | static inline void raw_local_irq_disable(void) | ||
73 | { | ||
74 | __asm__ __volatile__("cli" : : : "memory"); | ||
75 | } | ||
76 | |||
77 | static inline void raw_local_irq_enable(void) | ||
78 | { | ||
79 | __asm__ __volatile__("sti" : : : "memory"); | ||
80 | } | ||
81 | |||
82 | static inline int raw_irqs_disabled_flags(unsigned long flags) | ||
83 | { | ||
84 | return !(flags & X86_EFLAGS_IF); | ||
85 | } | ||
86 | |||
87 | #endif | ||
88 | |||
89 | /* | ||
90 | * For spinlocks, etc.: | ||
91 | */ | ||
92 | |||
93 | static inline unsigned long __raw_local_irq_save(void) | ||
94 | { | ||
95 | unsigned long flags = __raw_local_save_flags(); | ||
96 | |||
97 | raw_local_irq_disable(); | ||
98 | |||
99 | return flags; | ||
100 | } | ||
101 | |||
102 | #define raw_local_irq_save(flags) \ | ||
103 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
104 | |||
105 | static inline int raw_irqs_disabled(void) | ||
106 | { | ||
107 | unsigned long flags = __raw_local_save_flags(); | ||
108 | |||
109 | return raw_irqs_disabled_flags(flags); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * makes the traced hardirq state match with the machine state | ||
114 | * | ||
115 | * should be a rarely used function, only in places where its | ||
116 | * otherwise impossible to know the irq state, like in traps. | ||
117 | */ | ||
118 | static inline void trace_hardirqs_fixup_flags(unsigned long flags) | ||
119 | { | ||
120 | if (raw_irqs_disabled_flags(flags)) | ||
121 | trace_hardirqs_off(); | ||
122 | else | ||
123 | trace_hardirqs_on(); | ||
124 | } | ||
125 | |||
126 | static inline void trace_hardirqs_fixup(void) | ||
127 | { | ||
128 | unsigned long flags = __raw_local_save_flags(); | ||
129 | |||
130 | trace_hardirqs_fixup_flags(flags); | ||
131 | } | ||
132 | /* | ||
133 | * Used in the idle loop; sti takes one instruction cycle | ||
134 | * to complete: | ||
135 | */ | ||
136 | static inline void raw_safe_halt(void) | ||
137 | { | ||
138 | __asm__ __volatile__("sti; hlt" : : : "memory"); | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * Used when interrupts are already enabled or to | ||
143 | * shutdown the processor: | ||
144 | */ | ||
145 | static inline void halt(void) | ||
146 | { | ||
147 | __asm__ __volatile__("hlt": : :"memory"); | ||
148 | } | ||
149 | |||
150 | #else /* __ASSEMBLY__: */ | ||
151 | # ifdef CONFIG_TRACE_IRQFLAGS | ||
152 | # define TRACE_IRQS_ON call trace_hardirqs_on_thunk | ||
153 | # define TRACE_IRQS_OFF call trace_hardirqs_off_thunk | ||
154 | # else | ||
155 | # define TRACE_IRQS_ON | ||
156 | # define TRACE_IRQS_OFF | ||
157 | # endif | ||
158 | # ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
159 | # define LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk | ||
160 | # define LOCKDEP_SYS_EXIT_IRQ \ | ||
161 | TRACE_IRQS_ON; \ | ||
162 | sti; \ | ||
163 | SAVE_REST; \ | ||
164 | LOCKDEP_SYS_EXIT; \ | ||
165 | RESTORE_REST; \ | ||
166 | cli; \ | ||
167 | TRACE_IRQS_OFF; | ||
168 | # else | ||
169 | # define LOCKDEP_SYS_EXIT | ||
170 | # define LOCKDEP_SYS_EXIT_IRQ | ||
171 | # endif | ||
172 | #endif | ||
173 | |||
174 | #endif | ||
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index 19fd3e67b08c..be7b934f6c54 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -121,7 +121,7 @@ struct pv_cpu_ops { | |||
121 | u64 (*read_pmc)(void); | 121 | u64 (*read_pmc)(void); |
122 | 122 | ||
123 | /* These two are jmp to, not actually called. */ | 123 | /* These two are jmp to, not actually called. */ |
124 | void (*irq_enable_sysexit)(void); | 124 | void (*irq_enable_syscall_ret)(void); |
125 | void (*iret)(void); | 125 | void (*iret)(void); |
126 | 126 | ||
127 | struct pv_lazy_ops lazy_mode; | 127 | struct pv_lazy_ops lazy_mode; |
@@ -1138,9 +1138,10 @@ static inline unsigned long __raw_local_irq_save(void) | |||
1138 | call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \ | 1138 | call *%cs:pv_irq_ops+PV_IRQ_irq_enable; \ |
1139 | popl %edx; popl %ecx; popl %eax) | 1139 | popl %edx; popl %ecx; popl %eax) |
1140 | 1140 | ||
1141 | #define ENABLE_INTERRUPTS_SYSEXIT \ | 1141 | #define ENABLE_INTERRUPTS_SYSCALL_RET \ |
1142 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), CLBR_NONE,\ | 1142 | PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_syscall_ret),\ |
1143 | jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_sysexit) | 1143 | CLBR_NONE, \ |
1144 | jmp *%cs:pv_cpu_ops+PV_CPU_irq_enable_syscall_ret) | ||
1144 | 1145 | ||
1145 | #define GET_CR0_INTO_EAX \ | 1146 | #define GET_CR0_INTO_EAX \ |
1146 | push %ecx; push %edx; \ | 1147 | push %ecx; push %edx; \ |