diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-06-08 03:49:11 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-06-08 03:54:24 -0400 |
commit | a49976d14f780942dafafbbf16f891c27d385ea0 (patch) | |
tree | 6831a2f92bc19a5abf286dba2c7c6f10869cd527 | |
parent | b2502b418e63fcde0fe1857732a476b5aa3789b1 (diff) |
x86/asm/entry/32: Clean up entry_32.S
Make the 32-bit syscall entry code a bit more readable:
- use consistent assembly coding style similar to entry_64.S
- remove old comments that are not true anymore
- eliminate whitespace noise
- use consistent vertical spacing
- fix various comments
No code changed:
# arch/x86/entry/entry_32.o:
text data bss dec hex filename
6025 0 0 6025 1789 entry_32.o.before
6025 0 0 6025 1789 entry_32.o.after
md5:
f3fa16b2b0dca804f052deb6b30ba6cb entry_32.o.before.asm
f3fa16b2b0dca804f052deb6b30ba6cb entry_32.o.after.asm
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/entry/entry_32.S | 1141 |
1 files changed, 570 insertions, 571 deletions
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index d59461032625..edd7aadfacfa 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
@@ -1,23 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 1991,1992 Linus Torvalds | ||
2 | * | 3 | * |
3 | * Copyright (C) 1991, 1992 Linus Torvalds | 4 | * entry_32.S contains the system-call and low-level fault and trap handling routines. |
4 | */ | ||
5 | |||
6 | /* | ||
7 | * entry.S contains the system-call and fault low-level handling routines. | ||
8 | * This also contains the timer-interrupt handler, as well as all interrupts | ||
9 | * and faults that can result in a task-switch. | ||
10 | * | ||
11 | * NOTE: This code handles signal-recognition, which happens every time | ||
12 | * after a timer-interrupt and after each system call. | ||
13 | * | ||
14 | * I changed all the .align's to 4 (16 byte alignment), as that's faster | ||
15 | * on a 486. | ||
16 | * | 5 | * |
17 | * Stack layout in 'syscall_exit': | 6 | * Stack layout in 'syscall_exit': |
18 | * ptrace needs to have all regs on the stack. | 7 | * ptrace needs to have all registers on the stack. |
19 | * if the order here is changed, it needs to be | 8 | * If the order here is changed, it needs to be |
20 | * updated in fork.c:copy_process, signal.c:do_signal, | 9 | * updated in fork.c:copy_process(), signal.c:do_signal(), |
21 | * ptrace.c and ptrace.h | 10 | * ptrace.c and ptrace.h |
22 | * | 11 | * |
23 | * 0(%esp) - %ebx | 12 | * 0(%esp) - %ebx |
@@ -37,8 +26,6 @@ | |||
37 | * 38(%esp) - %eflags | 26 | * 38(%esp) - %eflags |
38 | * 3C(%esp) - %oldesp | 27 | * 3C(%esp) - %oldesp |
39 | * 40(%esp) - %oldss | 28 | * 40(%esp) - %oldss |
40 | * | ||
41 | * "current" is in register %ebx during any slow entries. | ||
42 | */ | 29 | */ |
43 | 30 | ||
44 | #include <linux/linkage.h> | 31 | #include <linux/linkage.h> |
@@ -61,11 +48,11 @@ | |||
61 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ | 48 | /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ |
62 | #include <linux/elf-em.h> | 49 | #include <linux/elf-em.h> |
63 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) | 50 | #define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE) |
64 | #define __AUDIT_ARCH_LE 0x40000000 | 51 | #define __AUDIT_ARCH_LE 0x40000000 |
65 | 52 | ||
66 | #ifndef CONFIG_AUDITSYSCALL | 53 | #ifndef CONFIG_AUDITSYSCALL |
67 | #define sysenter_audit syscall_trace_entry | 54 | # define sysenter_audit syscall_trace_entry |
68 | #define sysexit_audit syscall_exit_work | 55 | # define sysexit_audit syscall_exit_work |
69 | #endif | 56 | #endif |
70 | 57 | ||
71 | .section .entry.text, "ax" | 58 | .section .entry.text, "ax" |
@@ -84,16 +71,16 @@ | |||
84 | */ | 71 | */ |
85 | 72 | ||
86 | #ifdef CONFIG_PREEMPT | 73 | #ifdef CONFIG_PREEMPT |
87 | #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF | 74 | # define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF |
88 | #else | 75 | #else |
89 | #define preempt_stop(clobbers) | 76 | # define preempt_stop(clobbers) |
90 | #define resume_kernel restore_all | 77 | # define resume_kernel restore_all |
91 | #endif | 78 | #endif |
92 | 79 | ||
93 | .macro TRACE_IRQS_IRET | 80 | .macro TRACE_IRQS_IRET |
94 | #ifdef CONFIG_TRACE_IRQFLAGS | 81 | #ifdef CONFIG_TRACE_IRQFLAGS |
95 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off? | 82 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off? |
96 | jz 1f | 83 | jz 1f |
97 | TRACE_IRQS_ON | 84 | TRACE_IRQS_ON |
98 | 1: | 85 | 1: |
99 | #endif | 86 | #endif |
@@ -112,10 +99,10 @@ | |||
112 | 99 | ||
113 | /* unfortunately push/pop can't be no-op */ | 100 | /* unfortunately push/pop can't be no-op */ |
114 | .macro PUSH_GS | 101 | .macro PUSH_GS |
115 | pushl $0 | 102 | pushl $0 |
116 | .endm | 103 | .endm |
117 | .macro POP_GS pop=0 | 104 | .macro POP_GS pop=0 |
118 | addl $(4 + \pop), %esp | 105 | addl $(4 + \pop), %esp |
119 | .endm | 106 | .endm |
120 | .macro POP_GS_EX | 107 | .macro POP_GS_EX |
121 | .endm | 108 | .endm |
@@ -135,119 +122,119 @@ | |||
135 | #else /* CONFIG_X86_32_LAZY_GS */ | 122 | #else /* CONFIG_X86_32_LAZY_GS */ |
136 | 123 | ||
137 | .macro PUSH_GS | 124 | .macro PUSH_GS |
138 | pushl %gs | 125 | pushl %gs |
139 | .endm | 126 | .endm |
140 | 127 | ||
141 | .macro POP_GS pop=0 | 128 | .macro POP_GS pop=0 |
142 | 98: popl %gs | 129 | 98: popl %gs |
143 | .if \pop <> 0 | 130 | .if \pop <> 0 |
144 | add $\pop, %esp | 131 | add $\pop, %esp |
145 | .endif | 132 | .endif |
146 | .endm | 133 | .endm |
147 | .macro POP_GS_EX | 134 | .macro POP_GS_EX |
148 | .pushsection .fixup, "ax" | 135 | .pushsection .fixup, "ax" |
149 | 99: movl $0, (%esp) | 136 | 99: movl $0, (%esp) |
150 | jmp 98b | 137 | jmp 98b |
151 | .popsection | 138 | .popsection |
152 | _ASM_EXTABLE(98b,99b) | 139 | _ASM_EXTABLE(98b, 99b) |
153 | .endm | 140 | .endm |
154 | 141 | ||
155 | .macro PTGS_TO_GS | 142 | .macro PTGS_TO_GS |
156 | 98: mov PT_GS(%esp), %gs | 143 | 98: mov PT_GS(%esp), %gs |
157 | .endm | 144 | .endm |
158 | .macro PTGS_TO_GS_EX | 145 | .macro PTGS_TO_GS_EX |
159 | .pushsection .fixup, "ax" | 146 | .pushsection .fixup, "ax" |
160 | 99: movl $0, PT_GS(%esp) | 147 | 99: movl $0, PT_GS(%esp) |
161 | jmp 98b | 148 | jmp 98b |
162 | .popsection | 149 | .popsection |
163 | _ASM_EXTABLE(98b,99b) | 150 | _ASM_EXTABLE(98b, 99b) |
164 | .endm | 151 | .endm |
165 | 152 | ||
166 | .macro GS_TO_REG reg | 153 | .macro GS_TO_REG reg |
167 | movl %gs, \reg | 154 | movl %gs, \reg |
168 | .endm | 155 | .endm |
169 | .macro REG_TO_PTGS reg | 156 | .macro REG_TO_PTGS reg |
170 | movl \reg, PT_GS(%esp) | 157 | movl \reg, PT_GS(%esp) |
171 | .endm | 158 | .endm |
172 | .macro SET_KERNEL_GS reg | 159 | .macro SET_KERNEL_GS reg |
173 | movl $(__KERNEL_STACK_CANARY), \reg | 160 | movl $(__KERNEL_STACK_CANARY), \reg |
174 | movl \reg, %gs | 161 | movl \reg, %gs |
175 | .endm | 162 | .endm |
176 | 163 | ||
177 | #endif /* CONFIG_X86_32_LAZY_GS */ | 164 | #endif /* CONFIG_X86_32_LAZY_GS */ |
178 | 165 | ||
179 | .macro SAVE_ALL | 166 | .macro SAVE_ALL |
180 | cld | 167 | cld |
181 | PUSH_GS | 168 | PUSH_GS |
182 | pushl %fs | 169 | pushl %fs |
183 | pushl %es | 170 | pushl %es |
184 | pushl %ds | 171 | pushl %ds |
185 | pushl %eax | 172 | pushl %eax |
186 | pushl %ebp | 173 | pushl %ebp |
187 | pushl %edi | 174 | pushl %edi |
188 | pushl %esi | 175 | pushl %esi |
189 | pushl %edx | 176 | pushl %edx |
190 | pushl %ecx | 177 | pushl %ecx |
191 | pushl %ebx | 178 | pushl %ebx |
192 | movl $(__USER_DS), %edx | 179 | movl $(__USER_DS), %edx |
193 | movl %edx, %ds | 180 | movl %edx, %ds |
194 | movl %edx, %es | 181 | movl %edx, %es |
195 | movl $(__KERNEL_PERCPU), %edx | 182 | movl $(__KERNEL_PERCPU), %edx |
196 | movl %edx, %fs | 183 | movl %edx, %fs |
197 | SET_KERNEL_GS %edx | 184 | SET_KERNEL_GS %edx |
198 | .endm | 185 | .endm |
199 | 186 | ||
200 | .macro RESTORE_INT_REGS | 187 | .macro RESTORE_INT_REGS |
201 | popl %ebx | 188 | popl %ebx |
202 | popl %ecx | 189 | popl %ecx |
203 | popl %edx | 190 | popl %edx |
204 | popl %esi | 191 | popl %esi |
205 | popl %edi | 192 | popl %edi |
206 | popl %ebp | 193 | popl %ebp |
207 | popl %eax | 194 | popl %eax |
208 | .endm | 195 | .endm |
209 | 196 | ||
210 | .macro RESTORE_REGS pop=0 | 197 | .macro RESTORE_REGS pop=0 |
211 | RESTORE_INT_REGS | 198 | RESTORE_INT_REGS |
212 | 1: popl %ds | 199 | 1: popl %ds |
213 | 2: popl %es | 200 | 2: popl %es |
214 | 3: popl %fs | 201 | 3: popl %fs |
215 | POP_GS \pop | 202 | POP_GS \pop |
216 | .pushsection .fixup, "ax" | 203 | .pushsection .fixup, "ax" |
217 | 4: movl $0, (%esp) | 204 | 4: movl $0, (%esp) |
218 | jmp 1b | 205 | jmp 1b |
219 | 5: movl $0, (%esp) | 206 | 5: movl $0, (%esp) |
220 | jmp 2b | 207 | jmp 2b |
221 | 6: movl $0, (%esp) | 208 | 6: movl $0, (%esp) |
222 | jmp 3b | 209 | jmp 3b |
223 | .popsection | 210 | .popsection |
224 | _ASM_EXTABLE(1b,4b) | 211 | _ASM_EXTABLE(1b, 4b) |
225 | _ASM_EXTABLE(2b,5b) | 212 | _ASM_EXTABLE(2b, 5b) |
226 | _ASM_EXTABLE(3b,6b) | 213 | _ASM_EXTABLE(3b, 6b) |
227 | POP_GS_EX | 214 | POP_GS_EX |
228 | .endm | 215 | .endm |
229 | 216 | ||
230 | ENTRY(ret_from_fork) | 217 | ENTRY(ret_from_fork) |
231 | pushl %eax | 218 | pushl %eax |
232 | call schedule_tail | 219 | call schedule_tail |
233 | GET_THREAD_INFO(%ebp) | 220 | GET_THREAD_INFO(%ebp) |
234 | popl %eax | 221 | popl %eax |
235 | pushl $0x0202 # Reset kernel eflags | 222 | pushl $0x0202 # Reset kernel eflags |
236 | popfl | 223 | popfl |
237 | jmp syscall_exit | 224 | jmp syscall_exit |
238 | END(ret_from_fork) | 225 | END(ret_from_fork) |
239 | 226 | ||
240 | ENTRY(ret_from_kernel_thread) | 227 | ENTRY(ret_from_kernel_thread) |
241 | pushl %eax | 228 | pushl %eax |
242 | call schedule_tail | 229 | call schedule_tail |
243 | GET_THREAD_INFO(%ebp) | 230 | GET_THREAD_INFO(%ebp) |
244 | popl %eax | 231 | popl %eax |
245 | pushl $0x0202 # Reset kernel eflags | 232 | pushl $0x0202 # Reset kernel eflags |
246 | popfl | 233 | popfl |
247 | movl PT_EBP(%esp),%eax | 234 | movl PT_EBP(%esp), %eax |
248 | call *PT_EBX(%esp) | 235 | call *PT_EBX(%esp) |
249 | movl $0,PT_EAX(%esp) | 236 | movl $0, PT_EAX(%esp) |
250 | jmp syscall_exit | 237 | jmp syscall_exit |
251 | ENDPROC(ret_from_kernel_thread) | 238 | ENDPROC(ret_from_kernel_thread) |
252 | 239 | ||
253 | /* | 240 | /* |
@@ -264,62 +251,65 @@ ret_from_exception: | |||
264 | ret_from_intr: | 251 | ret_from_intr: |
265 | GET_THREAD_INFO(%ebp) | 252 | GET_THREAD_INFO(%ebp) |
266 | #ifdef CONFIG_VM86 | 253 | #ifdef CONFIG_VM86 |
267 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS | 254 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
268 | movb PT_CS(%esp), %al | 255 | movb PT_CS(%esp), %al |
269 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax | 256 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
270 | #else | 257 | #else |
271 | /* | 258 | /* |
272 | * We can be coming here from child spawned by kernel_thread(). | 259 | * We can be coming here from child spawned by kernel_thread(). |
273 | */ | 260 | */ |
274 | movl PT_CS(%esp), %eax | 261 | movl PT_CS(%esp), %eax |
275 | andl $SEGMENT_RPL_MASK, %eax | 262 | andl $SEGMENT_RPL_MASK, %eax |
276 | #endif | 263 | #endif |
277 | cmpl $USER_RPL, %eax | 264 | cmpl $USER_RPL, %eax |
278 | jb resume_kernel # not returning to v8086 or userspace | 265 | jb resume_kernel # not returning to v8086 or userspace |
279 | 266 | ||
280 | ENTRY(resume_userspace) | 267 | ENTRY(resume_userspace) |
281 | LOCKDEP_SYS_EXIT | 268 | LOCKDEP_SYS_EXIT |
282 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | 269 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
283 | # setting need_resched or sigpending | 270 | # setting need_resched or sigpending |
284 | # between sampling and the iret | 271 | # between sampling and the iret |
285 | TRACE_IRQS_OFF | 272 | TRACE_IRQS_OFF |
286 | movl TI_flags(%ebp), %ecx | 273 | movl TI_flags(%ebp), %ecx |
287 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on | 274 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done on |
288 | # int/exception return? | 275 | # int/exception return? |
289 | jne work_pending | 276 | jne work_pending |
290 | jmp restore_all | 277 | jmp restore_all |
291 | END(ret_from_exception) | 278 | END(ret_from_exception) |
292 | 279 | ||
293 | #ifdef CONFIG_PREEMPT | 280 | #ifdef CONFIG_PREEMPT |
294 | ENTRY(resume_kernel) | 281 | ENTRY(resume_kernel) |
295 | DISABLE_INTERRUPTS(CLBR_ANY) | 282 | DISABLE_INTERRUPTS(CLBR_ANY) |
296 | need_resched: | 283 | need_resched: |
297 | cmpl $0,PER_CPU_VAR(__preempt_count) | 284 | cmpl $0, PER_CPU_VAR(__preempt_count) |
298 | jnz restore_all | 285 | jnz restore_all |
299 | testl $X86_EFLAGS_IF,PT_EFLAGS(%esp) # interrupts off (exception path) ? | 286 | testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? |
300 | jz restore_all | 287 | jz restore_all |
301 | call preempt_schedule_irq | 288 | call preempt_schedule_irq |
302 | jmp need_resched | 289 | jmp need_resched |
303 | END(resume_kernel) | 290 | END(resume_kernel) |
304 | #endif | 291 | #endif |
305 | 292 | ||
306 | /* SYSENTER_RETURN points to after the "sysenter" instruction in | 293 | /* |
307 | the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ | 294 | * SYSENTER_RETURN points to after the SYSENTER instruction |
295 | * in the vsyscall page. See vsyscall-sysentry.S, which defines | ||
296 | * the symbol. | ||
297 | */ | ||
308 | 298 | ||
309 | # sysenter call handler stub | 299 | # SYSENTER call handler stub |
310 | ENTRY(entry_SYSENTER_32) | 300 | ENTRY(entry_SYSENTER_32) |
311 | movl TSS_sysenter_sp0(%esp),%esp | 301 | movl TSS_sysenter_sp0(%esp), %esp |
312 | sysenter_past_esp: | 302 | sysenter_past_esp: |
313 | /* | 303 | /* |
314 | * Interrupts are disabled here, but we can't trace it until | 304 | * Interrupts are disabled here, but we can't trace it until |
315 | * enough kernel state to call TRACE_IRQS_OFF can be called - but | 305 | * enough kernel state to call TRACE_IRQS_OFF can be called - but |
316 | * we immediately enable interrupts at that point anyway. | 306 | * we immediately enable interrupts at that point anyway. |
317 | */ | 307 | */ |
318 | pushl $__USER_DS | 308 | pushl $__USER_DS |
319 | pushl %ebp | 309 | pushl %ebp |
320 | pushfl | 310 | pushfl |
321 | orl $X86_EFLAGS_IF, (%esp) | 311 | orl $X86_EFLAGS_IF, (%esp) |
322 | pushl $__USER_CS | 312 | pushl $__USER_CS |
323 | /* | 313 | /* |
324 | * Push current_thread_info()->sysenter_return to the stack. | 314 | * Push current_thread_info()->sysenter_return to the stack. |
325 | * A tiny bit of offset fixup is necessary: TI_sysenter_return | 315 | * A tiny bit of offset fixup is necessary: TI_sysenter_return |
@@ -328,9 +318,9 @@ sysenter_past_esp: | |||
328 | * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack; | 318 | * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack; |
329 | * and THREAD_SIZE takes us to the bottom. | 319 | * and THREAD_SIZE takes us to the bottom. |
330 | */ | 320 | */ |
331 | pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp) | 321 | pushl ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp) |
332 | 322 | ||
333 | pushl %eax | 323 | pushl %eax |
334 | SAVE_ALL | 324 | SAVE_ALL |
335 | ENABLE_INTERRUPTS(CLBR_NONE) | 325 | ENABLE_INTERRUPTS(CLBR_NONE) |
336 | 326 | ||
@@ -338,132 +328,134 @@ sysenter_past_esp: | |||
338 | * Load the potential sixth argument from user stack. | 328 | * Load the potential sixth argument from user stack. |
339 | * Careful about security. | 329 | * Careful about security. |
340 | */ | 330 | */ |
341 | cmpl $__PAGE_OFFSET-3,%ebp | 331 | cmpl $__PAGE_OFFSET-3, %ebp |
342 | jae syscall_fault | 332 | jae syscall_fault |
343 | ASM_STAC | 333 | ASM_STAC |
344 | 1: movl (%ebp),%ebp | 334 | 1: movl (%ebp), %ebp |
345 | ASM_CLAC | 335 | ASM_CLAC |
346 | movl %ebp,PT_EBP(%esp) | 336 | movl %ebp, PT_EBP(%esp) |
347 | _ASM_EXTABLE(1b,syscall_fault) | 337 | _ASM_EXTABLE(1b, syscall_fault) |
348 | 338 | ||
349 | GET_THREAD_INFO(%ebp) | 339 | GET_THREAD_INFO(%ebp) |
350 | 340 | ||
351 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) | 341 | testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp) |
352 | jnz sysenter_audit | 342 | jnz sysenter_audit |
353 | sysenter_do_call: | 343 | sysenter_do_call: |
354 | cmpl $(NR_syscalls), %eax | 344 | cmpl $(NR_syscalls), %eax |
355 | jae sysenter_badsys | 345 | jae sysenter_badsys |
356 | call *sys_call_table(,%eax,4) | 346 | call *sys_call_table(, %eax, 4) |
357 | sysenter_after_call: | 347 | sysenter_after_call: |
358 | movl %eax,PT_EAX(%esp) | 348 | movl %eax, PT_EAX(%esp) |
359 | LOCKDEP_SYS_EXIT | 349 | LOCKDEP_SYS_EXIT |
360 | DISABLE_INTERRUPTS(CLBR_ANY) | 350 | DISABLE_INTERRUPTS(CLBR_ANY) |
361 | TRACE_IRQS_OFF | 351 | TRACE_IRQS_OFF |
362 | movl TI_flags(%ebp), %ecx | 352 | movl TI_flags(%ebp), %ecx |
363 | testl $_TIF_ALLWORK_MASK, %ecx | 353 | testl $_TIF_ALLWORK_MASK, %ecx |
364 | jnz sysexit_audit | 354 | jnz sysexit_audit |
365 | sysenter_exit: | 355 | sysenter_exit: |
366 | /* if something modifies registers it must also disable sysexit */ | 356 | /* if something modifies registers it must also disable sysexit */ |
367 | movl PT_EIP(%esp), %edx | 357 | movl PT_EIP(%esp), %edx |
368 | movl PT_OLDESP(%esp), %ecx | 358 | movl PT_OLDESP(%esp), %ecx |
369 | xorl %ebp,%ebp | 359 | xorl %ebp, %ebp |
370 | TRACE_IRQS_ON | 360 | TRACE_IRQS_ON |
371 | 1: mov PT_FS(%esp), %fs | 361 | 1: mov PT_FS(%esp), %fs |
372 | PTGS_TO_GS | 362 | PTGS_TO_GS |
373 | ENABLE_INTERRUPTS_SYSEXIT | 363 | ENABLE_INTERRUPTS_SYSEXIT |
374 | 364 | ||
375 | #ifdef CONFIG_AUDITSYSCALL | 365 | #ifdef CONFIG_AUDITSYSCALL |
376 | sysenter_audit: | 366 | sysenter_audit: |
377 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) | 367 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp) |
378 | jnz syscall_trace_entry | 368 | jnz syscall_trace_entry |
379 | /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ | 369 | /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */ |
380 | movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ | 370 | movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */ |
381 | /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ | 371 | /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */ |
382 | pushl PT_ESI(%esp) /* a3: 5th arg */ | 372 | pushl PT_ESI(%esp) /* a3: 5th arg */ |
383 | pushl PT_EDX+4(%esp) /* a2: 4th arg */ | 373 | pushl PT_EDX+4(%esp) /* a2: 4th arg */ |
384 | call __audit_syscall_entry | 374 | call __audit_syscall_entry |
385 | popl %ecx /* get that remapped edx off the stack */ | 375 | popl %ecx /* get that remapped edx off the stack */ |
386 | popl %ecx /* get that remapped esi off the stack */ | 376 | popl %ecx /* get that remapped esi off the stack */ |
387 | movl PT_EAX(%esp),%eax /* reload syscall number */ | 377 | movl PT_EAX(%esp), %eax /* reload syscall number */ |
388 | jmp sysenter_do_call | 378 | jmp sysenter_do_call |
389 | 379 | ||
390 | sysexit_audit: | 380 | sysexit_audit: |
391 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx | 381 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx |
392 | jnz syscall_exit_work | 382 | jnz syscall_exit_work |
393 | TRACE_IRQS_ON | 383 | TRACE_IRQS_ON |
394 | ENABLE_INTERRUPTS(CLBR_ANY) | 384 | ENABLE_INTERRUPTS(CLBR_ANY) |
395 | movl %eax,%edx /* second arg, syscall return value */ | 385 | movl %eax, %edx /* second arg, syscall return value */ |
396 | cmpl $-MAX_ERRNO,%eax /* is it an error ? */ | 386 | cmpl $-MAX_ERRNO, %eax /* is it an error ? */ |
397 | setbe %al /* 1 if so, 0 if not */ | 387 | setbe %al /* 1 if so, 0 if not */ |
398 | movzbl %al,%eax /* zero-extend that */ | 388 | movzbl %al, %eax /* zero-extend that */ |
399 | call __audit_syscall_exit | 389 | call __audit_syscall_exit |
400 | DISABLE_INTERRUPTS(CLBR_ANY) | 390 | DISABLE_INTERRUPTS(CLBR_ANY) |
401 | TRACE_IRQS_OFF | 391 | TRACE_IRQS_OFF |
402 | movl TI_flags(%ebp), %ecx | 392 | movl TI_flags(%ebp), %ecx |
403 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx | 393 | testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx |
404 | jnz syscall_exit_work | 394 | jnz syscall_exit_work |
405 | movl PT_EAX(%esp),%eax /* reload syscall return value */ | 395 | movl PT_EAX(%esp), %eax /* reload syscall return value */ |
406 | jmp sysenter_exit | 396 | jmp sysenter_exit |
407 | #endif | 397 | #endif |
408 | 398 | ||
409 | .pushsection .fixup,"ax" | 399 | .pushsection .fixup, "ax" |
410 | 2: movl $0,PT_FS(%esp) | 400 | 2: movl $0, PT_FS(%esp) |
411 | jmp 1b | 401 | jmp 1b |
412 | .popsection | 402 | .popsection |
413 | _ASM_EXTABLE(1b,2b) | 403 | _ASM_EXTABLE(1b, 2b) |
414 | PTGS_TO_GS_EX | 404 | PTGS_TO_GS_EX |
415 | ENDPROC(entry_SYSENTER_32) | 405 | ENDPROC(entry_SYSENTER_32) |
416 | 406 | ||
417 | # system call handler stub | 407 | # system call handler stub |
418 | ENTRY(entry_INT80_32) | 408 | ENTRY(entry_INT80_32) |
419 | ASM_CLAC | 409 | ASM_CLAC |
420 | pushl %eax # save orig_eax | 410 | pushl %eax # save orig_eax |
421 | SAVE_ALL | 411 | SAVE_ALL |
422 | GET_THREAD_INFO(%ebp) | 412 | GET_THREAD_INFO(%ebp) |
423 | # system call tracing in operation / emulation | 413 | # system call tracing in operation / emulation |
424 | testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp) | 414 | testl $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp) |
425 | jnz syscall_trace_entry | 415 | jnz syscall_trace_entry |
426 | cmpl $(NR_syscalls), %eax | 416 | cmpl $(NR_syscalls), %eax |
427 | jae syscall_badsys | 417 | jae syscall_badsys |
428 | syscall_call: | 418 | syscall_call: |
429 | call *sys_call_table(,%eax,4) | 419 | call *sys_call_table(, %eax, 4) |
430 | syscall_after_call: | 420 | syscall_after_call: |
431 | movl %eax,PT_EAX(%esp) # store the return value | 421 | movl %eax, PT_EAX(%esp) # store the return value |
432 | syscall_exit: | 422 | syscall_exit: |
433 | LOCKDEP_SYS_EXIT | 423 | LOCKDEP_SYS_EXIT |
434 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | 424 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
435 | # setting need_resched or sigpending | 425 | # setting need_resched or sigpending |
436 | # between sampling and the iret | 426 | # between sampling and the iret |
437 | TRACE_IRQS_OFF | 427 | TRACE_IRQS_OFF |
438 | movl TI_flags(%ebp), %ecx | 428 | movl TI_flags(%ebp), %ecx |
439 | testl $_TIF_ALLWORK_MASK, %ecx # current->work | 429 | testl $_TIF_ALLWORK_MASK, %ecx # current->work |
440 | jnz syscall_exit_work | 430 | jnz syscall_exit_work |
441 | 431 | ||
442 | restore_all: | 432 | restore_all: |
443 | TRACE_IRQS_IRET | 433 | TRACE_IRQS_IRET |
444 | restore_all_notrace: | 434 | restore_all_notrace: |
445 | #ifdef CONFIG_X86_ESPFIX32 | 435 | #ifdef CONFIG_X86_ESPFIX32 |
446 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS | 436 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS |
447 | # Warning: PT_OLDSS(%esp) contains the wrong/random values if we | 437 | /* |
448 | # are returning to the kernel. | 438 | * Warning: PT_OLDSS(%esp) contains the wrong/random values if we |
449 | # See comments in process.c:copy_thread() for details. | 439 | * are returning to the kernel. |
450 | movb PT_OLDSS(%esp), %ah | 440 | * See comments in process.c:copy_thread() for details. |
451 | movb PT_CS(%esp), %al | 441 | */ |
452 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax | 442 | movb PT_OLDSS(%esp), %ah |
453 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax | 443 | movb PT_CS(%esp), %al |
454 | je ldt_ss # returning to user-space with LDT SS | 444 | andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax |
445 | cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax | ||
446 | je ldt_ss # returning to user-space with LDT SS | ||
455 | #endif | 447 | #endif |
456 | restore_nocheck: | 448 | restore_nocheck: |
457 | RESTORE_REGS 4 # skip orig_eax/error_code | 449 | RESTORE_REGS 4 # skip orig_eax/error_code |
458 | irq_return: | 450 | irq_return: |
459 | INTERRUPT_RETURN | 451 | INTERRUPT_RETURN |
460 | .section .fixup,"ax" | 452 | .section .fixup, "ax" |
461 | ENTRY(iret_exc) | 453 | ENTRY(iret_exc ) |
462 | pushl $0 # no error code | 454 | pushl $0 # no error code |
463 | pushl $do_iret_error | 455 | pushl $do_iret_error |
464 | jmp error_code | 456 | jmp error_code |
465 | .previous | 457 | .previous |
466 | _ASM_EXTABLE(irq_return,iret_exc) | 458 | _ASM_EXTABLE(irq_return, iret_exc) |
467 | 459 | ||
468 | #ifdef CONFIG_X86_ESPFIX32 | 460 | #ifdef CONFIG_X86_ESPFIX32 |
469 | ldt_ss: | 461 | ldt_ss: |
@@ -476,8 +468,8 @@ ldt_ss: | |||
476 | * is still available to implement the setting of the high | 468 | * is still available to implement the setting of the high |
477 | * 16-bits in the INTERRUPT_RETURN paravirt-op. | 469 | * 16-bits in the INTERRUPT_RETURN paravirt-op. |
478 | */ | 470 | */ |
479 | cmpl $0, pv_info+PARAVIRT_enabled | 471 | cmpl $0, pv_info+PARAVIRT_enabled |
480 | jne restore_nocheck | 472 | jne restore_nocheck |
481 | #endif | 473 | #endif |
482 | 474 | ||
483 | /* | 475 | /* |
@@ -492,21 +484,23 @@ ldt_ss: | |||
492 | * a base address that matches for the difference. | 484 | * a base address that matches for the difference. |
493 | */ | 485 | */ |
494 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) | 486 | #define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8) |
495 | mov %esp, %edx /* load kernel esp */ | 487 | mov %esp, %edx /* load kernel esp */ |
496 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ | 488 | mov PT_OLDESP(%esp), %eax /* load userspace esp */ |
497 | mov %dx, %ax /* eax: new kernel esp */ | 489 | mov %dx, %ax /* eax: new kernel esp */ |
498 | sub %eax, %edx /* offset (low word is 0) */ | 490 | sub %eax, %edx /* offset (low word is 0) */ |
499 | shr $16, %edx | 491 | shr $16, %edx |
500 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ | 492 | mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */ |
501 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ | 493 | mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */ |
502 | pushl $__ESPFIX_SS | 494 | pushl $__ESPFIX_SS |
503 | pushl %eax /* new kernel esp */ | 495 | pushl %eax /* new kernel esp */ |
504 | /* Disable interrupts, but do not irqtrace this section: we | 496 | /* |
497 | * Disable interrupts, but do not irqtrace this section: we | ||
505 | * will soon execute iret and the tracer was already set to | 498 | * will soon execute iret and the tracer was already set to |
506 | * the irqstate after the iret */ | 499 | * the irqstate after the IRET: |
500 | */ | ||
507 | DISABLE_INTERRUPTS(CLBR_EAX) | 501 | DISABLE_INTERRUPTS(CLBR_EAX) |
508 | lss (%esp), %esp /* switch to espfix segment */ | 502 | lss (%esp), %esp /* switch to espfix segment */ |
509 | jmp restore_nocheck | 503 | jmp restore_nocheck |
510 | #endif | 504 | #endif |
511 | ENDPROC(entry_INT80_32) | 505 | ENDPROC(entry_INT80_32) |
512 | 506 | ||
@@ -514,93 +508,93 @@ ENDPROC(entry_INT80_32) | |||
514 | ALIGN | 508 | ALIGN |
515 | work_pending: | 509 | work_pending: |
516 | testb $_TIF_NEED_RESCHED, %cl | 510 | testb $_TIF_NEED_RESCHED, %cl |
517 | jz work_notifysig | 511 | jz work_notifysig |
518 | work_resched: | 512 | work_resched: |
519 | call schedule | 513 | call schedule |
520 | LOCKDEP_SYS_EXIT | 514 | LOCKDEP_SYS_EXIT |
521 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt | 515 | DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt |
522 | # setting need_resched or sigpending | 516 | # setting need_resched or sigpending |
523 | # between sampling and the iret | 517 | # between sampling and the iret |
524 | TRACE_IRQS_OFF | 518 | TRACE_IRQS_OFF |
525 | movl TI_flags(%ebp), %ecx | 519 | movl TI_flags(%ebp), %ecx |
526 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other | 520 | andl $_TIF_WORK_MASK, %ecx # is there any work to be done other |
527 | # than syscall tracing? | 521 | # than syscall tracing? |
528 | jz restore_all | 522 | jz restore_all |
529 | testb $_TIF_NEED_RESCHED, %cl | 523 | testb $_TIF_NEED_RESCHED, %cl |
530 | jnz work_resched | 524 | jnz work_resched |
531 | 525 | ||
532 | work_notifysig: # deal with pending signals and | 526 | work_notifysig: # deal with pending signals and |
533 | # notify-resume requests | 527 | # notify-resume requests |
534 | #ifdef CONFIG_VM86 | 528 | #ifdef CONFIG_VM86 |
535 | testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) | 529 | testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) |
536 | movl %esp, %eax | 530 | movl %esp, %eax |
537 | jnz work_notifysig_v86 # returning to kernel-space or | 531 | jnz work_notifysig_v86 # returning to kernel-space or |
538 | # vm86-space | 532 | # vm86-space |
539 | 1: | 533 | 1: |
540 | #else | 534 | #else |
541 | movl %esp, %eax | 535 | movl %esp, %eax |
542 | #endif | 536 | #endif |
543 | TRACE_IRQS_ON | 537 | TRACE_IRQS_ON |
544 | ENABLE_INTERRUPTS(CLBR_NONE) | 538 | ENABLE_INTERRUPTS(CLBR_NONE) |
545 | movb PT_CS(%esp), %bl | 539 | movb PT_CS(%esp), %bl |
546 | andb $SEGMENT_RPL_MASK, %bl | 540 | andb $SEGMENT_RPL_MASK, %bl |
547 | cmpb $USER_RPL, %bl | 541 | cmpb $USER_RPL, %bl |
548 | jb resume_kernel | 542 | jb resume_kernel |
549 | xorl %edx, %edx | 543 | xorl %edx, %edx |
550 | call do_notify_resume | 544 | call do_notify_resume |
551 | jmp resume_userspace | 545 | jmp resume_userspace |
552 | 546 | ||
553 | #ifdef CONFIG_VM86 | 547 | #ifdef CONFIG_VM86 |
554 | ALIGN | 548 | ALIGN |
555 | work_notifysig_v86: | 549 | work_notifysig_v86: |
556 | pushl %ecx # save ti_flags for do_notify_resume | 550 | pushl %ecx # save ti_flags for do_notify_resume |
557 | call save_v86_state # %eax contains pt_regs pointer | 551 | call save_v86_state # %eax contains pt_regs pointer |
558 | popl %ecx | 552 | popl %ecx |
559 | movl %eax, %esp | 553 | movl %eax, %esp |
560 | jmp 1b | 554 | jmp 1b |
561 | #endif | 555 | #endif |
562 | END(work_pending) | 556 | END(work_pending) |
563 | 557 | ||
564 | # perform syscall exit tracing | 558 | # perform syscall exit tracing |
565 | ALIGN | 559 | ALIGN |
566 | syscall_trace_entry: | 560 | syscall_trace_entry: |
567 | movl $-ENOSYS,PT_EAX(%esp) | 561 | movl $-ENOSYS, PT_EAX(%esp) |
568 | movl %esp, %eax | 562 | movl %esp, %eax |
569 | call syscall_trace_enter | 563 | call syscall_trace_enter |
570 | /* What it returned is what we'll actually use. */ | 564 | /* What it returned is what we'll actually use. */ |
571 | cmpl $(NR_syscalls), %eax | 565 | cmpl $(NR_syscalls), %eax |
572 | jnae syscall_call | 566 | jnae syscall_call |
573 | jmp syscall_exit | 567 | jmp syscall_exit |
574 | END(syscall_trace_entry) | 568 | END(syscall_trace_entry) |
575 | 569 | ||
576 | # perform syscall exit tracing | 570 | # perform syscall exit tracing |
577 | ALIGN | 571 | ALIGN |
578 | syscall_exit_work: | 572 | syscall_exit_work: |
579 | testl $_TIF_WORK_SYSCALL_EXIT, %ecx | 573 | testl $_TIF_WORK_SYSCALL_EXIT, %ecx |
580 | jz work_pending | 574 | jz work_pending |
581 | TRACE_IRQS_ON | 575 | TRACE_IRQS_ON |
582 | ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call | 576 | ENABLE_INTERRUPTS(CLBR_ANY) # could let syscall_trace_leave() call |
583 | # schedule() instead | 577 | # schedule() instead |
584 | movl %esp, %eax | 578 | movl %esp, %eax |
585 | call syscall_trace_leave | 579 | call syscall_trace_leave |
586 | jmp resume_userspace | 580 | jmp resume_userspace |
587 | END(syscall_exit_work) | 581 | END(syscall_exit_work) |
588 | 582 | ||
589 | syscall_fault: | 583 | syscall_fault: |
590 | ASM_CLAC | 584 | ASM_CLAC |
591 | GET_THREAD_INFO(%ebp) | 585 | GET_THREAD_INFO(%ebp) |
592 | movl $-EFAULT,PT_EAX(%esp) | 586 | movl $-EFAULT, PT_EAX(%esp) |
593 | jmp resume_userspace | 587 | jmp resume_userspace |
594 | END(syscall_fault) | 588 | END(syscall_fault) |
595 | 589 | ||
596 | syscall_badsys: | 590 | syscall_badsys: |
597 | movl $-ENOSYS,%eax | 591 | movl $-ENOSYS, %eax |
598 | jmp syscall_after_call | 592 | jmp syscall_after_call |
599 | END(syscall_badsys) | 593 | END(syscall_badsys) |
600 | 594 | ||
601 | sysenter_badsys: | 595 | sysenter_badsys: |
602 | movl $-ENOSYS,%eax | 596 | movl $-ENOSYS, %eax |
603 | jmp sysenter_after_call | 597 | jmp sysenter_after_call |
604 | END(sysenter_badsys) | 598 | END(sysenter_badsys) |
605 | 599 | ||
606 | .macro FIXUP_ESPFIX_STACK | 600 | .macro FIXUP_ESPFIX_STACK |
@@ -613,24 +607,24 @@ END(sysenter_badsys) | |||
613 | */ | 607 | */ |
614 | #ifdef CONFIG_X86_ESPFIX32 | 608 | #ifdef CONFIG_X86_ESPFIX32 |
615 | /* fixup the stack */ | 609 | /* fixup the stack */ |
616 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ | 610 | mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */ |
617 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ | 611 | mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */ |
618 | shl $16, %eax | 612 | shl $16, %eax |
619 | addl %esp, %eax /* the adjusted stack pointer */ | 613 | addl %esp, %eax /* the adjusted stack pointer */ |
620 | pushl $__KERNEL_DS | 614 | pushl $__KERNEL_DS |
621 | pushl %eax | 615 | pushl %eax |
622 | lss (%esp), %esp /* switch to the normal stack segment */ | 616 | lss (%esp), %esp /* switch to the normal stack segment */ |
623 | #endif | 617 | #endif |
624 | .endm | 618 | .endm |
625 | .macro UNWIND_ESPFIX_STACK | 619 | .macro UNWIND_ESPFIX_STACK |
626 | #ifdef CONFIG_X86_ESPFIX32 | 620 | #ifdef CONFIG_X86_ESPFIX32 |
627 | movl %ss, %eax | 621 | movl %ss, %eax |
628 | /* see if on espfix stack */ | 622 | /* see if on espfix stack */ |
629 | cmpw $__ESPFIX_SS, %ax | 623 | cmpw $__ESPFIX_SS, %ax |
630 | jne 27f | 624 | jne 27f |
631 | movl $__KERNEL_DS, %eax | 625 | movl $__KERNEL_DS, %eax |
632 | movl %eax, %ds | 626 | movl %eax, %ds |
633 | movl %eax, %es | 627 | movl %eax, %es |
634 | /* switch to normal stack */ | 628 | /* switch to normal stack */ |
635 | FIXUP_ESPFIX_STACK | 629 | FIXUP_ESPFIX_STACK |
636 | 27: | 630 | 27: |
@@ -645,7 +639,7 @@ END(sysenter_badsys) | |||
645 | ENTRY(irq_entries_start) | 639 | ENTRY(irq_entries_start) |
646 | vector=FIRST_EXTERNAL_VECTOR | 640 | vector=FIRST_EXTERNAL_VECTOR |
647 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) | 641 | .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR) |
648 | pushl $(~vector+0x80) /* Note: always in signed byte range */ | 642 | pushl $(~vector+0x80) /* Note: always in signed byte range */ |
649 | vector=vector+1 | 643 | vector=vector+1 |
650 | jmp common_interrupt | 644 | jmp common_interrupt |
651 | .align 8 | 645 | .align 8 |
@@ -659,35 +653,34 @@ END(irq_entries_start) | |||
659 | .p2align CONFIG_X86_L1_CACHE_SHIFT | 653 | .p2align CONFIG_X86_L1_CACHE_SHIFT |
660 | common_interrupt: | 654 | common_interrupt: |
661 | ASM_CLAC | 655 | ASM_CLAC |
662 | addl $-0x80,(%esp) /* Adjust vector into the [-256,-1] range */ | 656 | addl $-0x80, (%esp) /* Adjust vector into the [-256, -1] range */ |
663 | SAVE_ALL | 657 | SAVE_ALL |
664 | TRACE_IRQS_OFF | 658 | TRACE_IRQS_OFF |
665 | movl %esp,%eax | 659 | movl %esp, %eax |
666 | call do_IRQ | 660 | call do_IRQ |
667 | jmp ret_from_intr | 661 | jmp ret_from_intr |
668 | ENDPROC(common_interrupt) | 662 | ENDPROC(common_interrupt) |
669 | 663 | ||
670 | #define BUILD_INTERRUPT3(name, nr, fn) \ | 664 | #define BUILD_INTERRUPT3(name, nr, fn) \ |
671 | ENTRY(name) \ | 665 | ENTRY(name) \ |
672 | ASM_CLAC; \ | 666 | ASM_CLAC; \ |
673 | pushl $~(nr); \ | 667 | pushl $~(nr); \ |
674 | SAVE_ALL; \ | 668 | SAVE_ALL; \ |
675 | TRACE_IRQS_OFF \ | 669 | TRACE_IRQS_OFF \ |
676 | movl %esp,%eax; \ | 670 | movl %esp, %eax; \ |
677 | call fn; \ | 671 | call fn; \ |
678 | jmp ret_from_intr; \ | 672 | jmp ret_from_intr; \ |
679 | ENDPROC(name) | 673 | ENDPROC(name) |
680 | 674 | ||
681 | 675 | ||
682 | #ifdef CONFIG_TRACING | 676 | #ifdef CONFIG_TRACING |
683 | #define TRACE_BUILD_INTERRUPT(name, nr) \ | 677 | # define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name) |
684 | BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name) | ||
685 | #else | 678 | #else |
686 | #define TRACE_BUILD_INTERRUPT(name, nr) | 679 | # define TRACE_BUILD_INTERRUPT(name, nr) |
687 | #endif | 680 | #endif |
688 | 681 | ||
689 | #define BUILD_INTERRUPT(name, nr) \ | 682 | #define BUILD_INTERRUPT(name, nr) \ |
690 | BUILD_INTERRUPT3(name, nr, smp_##name); \ | 683 | BUILD_INTERRUPT3(name, nr, smp_##name); \ |
691 | TRACE_BUILD_INTERRUPT(name, nr) | 684 | TRACE_BUILD_INTERRUPT(name, nr) |
692 | 685 | ||
693 | /* The include is where all of the SMP etc. interrupts come from */ | 686 | /* The include is where all of the SMP etc. interrupts come from */ |
@@ -695,30 +688,30 @@ ENDPROC(name) | |||
695 | 688 | ||
696 | ENTRY(coprocessor_error) | 689 | ENTRY(coprocessor_error) |
697 | ASM_CLAC | 690 | ASM_CLAC |
698 | pushl $0 | 691 | pushl $0 |
699 | pushl $do_coprocessor_error | 692 | pushl $do_coprocessor_error |
700 | jmp error_code | 693 | jmp error_code |
701 | END(coprocessor_error) | 694 | END(coprocessor_error) |
702 | 695 | ||
703 | ENTRY(simd_coprocessor_error) | 696 | ENTRY(simd_coprocessor_error) |
704 | ASM_CLAC | 697 | ASM_CLAC |
705 | pushl $0 | 698 | pushl $0 |
706 | #ifdef CONFIG_X86_INVD_BUG | 699 | #ifdef CONFIG_X86_INVD_BUG |
707 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ | 700 | /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */ |
708 | ALTERNATIVE "pushl $do_general_protection", \ | 701 | ALTERNATIVE "pushl $do_general_protection", \ |
709 | "pushl $do_simd_coprocessor_error", \ | 702 | "pushl $do_simd_coprocessor_error", \ |
710 | X86_FEATURE_XMM | 703 | X86_FEATURE_XMM |
711 | #else | 704 | #else |
712 | pushl $do_simd_coprocessor_error | 705 | pushl $do_simd_coprocessor_error |
713 | #endif | 706 | #endif |
714 | jmp error_code | 707 | jmp error_code |
715 | END(simd_coprocessor_error) | 708 | END(simd_coprocessor_error) |
716 | 709 | ||
717 | ENTRY(device_not_available) | 710 | ENTRY(device_not_available) |
718 | ASM_CLAC | 711 | ASM_CLAC |
719 | pushl $-1 # mark this as an int | 712 | pushl $-1 # mark this as an int |
720 | pushl $do_device_not_available | 713 | pushl $do_device_not_available |
721 | jmp error_code | 714 | jmp error_code |
722 | END(device_not_available) | 715 | END(device_not_available) |
723 | 716 | ||
724 | #ifdef CONFIG_PARAVIRT | 717 | #ifdef CONFIG_PARAVIRT |
@@ -735,165 +728,171 @@ END(native_irq_enable_sysexit) | |||
735 | 728 | ||
736 | ENTRY(overflow) | 729 | ENTRY(overflow) |
737 | ASM_CLAC | 730 | ASM_CLAC |
738 | pushl $0 | 731 | pushl $0 |
739 | pushl $do_overflow | 732 | pushl $do_overflow |
740 | jmp error_code | 733 | jmp error_code |
741 | END(overflow) | 734 | END(overflow) |
742 | 735 | ||
743 | ENTRY(bounds) | 736 | ENTRY(bounds) |
744 | ASM_CLAC | 737 | ASM_CLAC |
745 | pushl $0 | 738 | pushl $0 |
746 | pushl $do_bounds | 739 | pushl $do_bounds |
747 | jmp error_code | 740 | jmp error_code |
748 | END(bounds) | 741 | END(bounds) |
749 | 742 | ||
750 | ENTRY(invalid_op) | 743 | ENTRY(invalid_op) |
751 | ASM_CLAC | 744 | ASM_CLAC |
752 | pushl $0 | 745 | pushl $0 |
753 | pushl $do_invalid_op | 746 | pushl $do_invalid_op |
754 | jmp error_code | 747 | jmp error_code |
755 | END(invalid_op) | 748 | END(invalid_op) |
756 | 749 | ||
757 | ENTRY(coprocessor_segment_overrun) | 750 | ENTRY(coprocessor_segment_overrun) |
758 | ASM_CLAC | 751 | ASM_CLAC |
759 | pushl $0 | 752 | pushl $0 |
760 | pushl $do_coprocessor_segment_overrun | 753 | pushl $do_coprocessor_segment_overrun |
761 | jmp error_code | 754 | jmp error_code |
762 | END(coprocessor_segment_overrun) | 755 | END(coprocessor_segment_overrun) |
763 | 756 | ||
764 | ENTRY(invalid_TSS) | 757 | ENTRY(invalid_TSS) |
765 | ASM_CLAC | 758 | ASM_CLAC |
766 | pushl $do_invalid_TSS | 759 | pushl $do_invalid_TSS |
767 | jmp error_code | 760 | jmp error_code |
768 | END(invalid_TSS) | 761 | END(invalid_TSS) |
769 | 762 | ||
770 | ENTRY(segment_not_present) | 763 | ENTRY(segment_not_present) |
771 | ASM_CLAC | 764 | ASM_CLAC |
772 | pushl $do_segment_not_present | 765 | pushl $do_segment_not_present |
773 | jmp error_code | 766 | jmp error_code |
774 | END(segment_not_present) | 767 | END(segment_not_present) |
775 | 768 | ||
776 | ENTRY(stack_segment) | 769 | ENTRY(stack_segment) |
777 | ASM_CLAC | 770 | ASM_CLAC |
778 | pushl $do_stack_segment | 771 | pushl $do_stack_segment |
779 | jmp error_code | 772 | jmp error_code |
780 | END(stack_segment) | 773 | END(stack_segment) |
781 | 774 | ||
782 | ENTRY(alignment_check) | 775 | ENTRY(alignment_check) |
783 | ASM_CLAC | 776 | ASM_CLAC |
784 | pushl $do_alignment_check | 777 | pushl $do_alignment_check |
785 | jmp error_code | 778 | jmp error_code |
786 | END(alignment_check) | 779 | END(alignment_check) |
787 | 780 | ||
788 | ENTRY(divide_error) | 781 | ENTRY(divide_error) |
789 | ASM_CLAC | 782 | ASM_CLAC |
790 | pushl $0 # no error code | 783 | pushl $0 # no error code |
791 | pushl $do_divide_error | 784 | pushl $do_divide_error |
792 | jmp error_code | 785 | jmp error_code |
793 | END(divide_error) | 786 | END(divide_error) |
794 | 787 | ||
795 | #ifdef CONFIG_X86_MCE | 788 | #ifdef CONFIG_X86_MCE |
796 | ENTRY(machine_check) | 789 | ENTRY(machine_check) |
797 | ASM_CLAC | 790 | ASM_CLAC |
798 | pushl $0 | 791 | pushl $0 |
799 | pushl machine_check_vector | 792 | pushl machine_check_vector |
800 | jmp error_code | 793 | jmp error_code |
801 | END(machine_check) | 794 | END(machine_check) |
802 | #endif | 795 | #endif |
803 | 796 | ||
804 | ENTRY(spurious_interrupt_bug) | 797 | ENTRY(spurious_interrupt_bug) |
805 | ASM_CLAC | 798 | ASM_CLAC |
806 | pushl $0 | 799 | pushl $0 |
807 | pushl $do_spurious_interrupt_bug | 800 | pushl $do_spurious_interrupt_bug |
808 | jmp error_code | 801 | jmp error_code |
809 | END(spurious_interrupt_bug) | 802 | END(spurious_interrupt_bug) |
810 | 803 | ||
811 | #ifdef CONFIG_XEN | 804 | #ifdef CONFIG_XEN |
812 | /* Xen doesn't set %esp to be precisely what the normal sysenter | 805 | /* |
813 | entrypoint expects, so fix it up before using the normal path. */ | 806 | * Xen doesn't set %esp to be precisely what the normal SYSENTER |
807 | * entry point expects, so fix it up before using the normal path. | ||
808 | */ | ||
814 | ENTRY(xen_sysenter_target) | 809 | ENTRY(xen_sysenter_target) |
815 | addl $5*4, %esp /* remove xen-provided frame */ | 810 | addl $5*4, %esp /* remove xen-provided frame */ |
816 | jmp sysenter_past_esp | 811 | jmp sysenter_past_esp |
817 | 812 | ||
818 | ENTRY(xen_hypervisor_callback) | 813 | ENTRY(xen_hypervisor_callback) |
819 | pushl $-1 /* orig_ax = -1 => not a system call */ | 814 | pushl $-1 /* orig_ax = -1 => not a system call */ |
820 | SAVE_ALL | 815 | SAVE_ALL |
821 | TRACE_IRQS_OFF | 816 | TRACE_IRQS_OFF |
822 | 817 | ||
823 | /* Check to see if we got the event in the critical | 818 | /* |
824 | region in xen_iret_direct, after we've reenabled | 819 | * Check to see if we got the event in the critical |
825 | events and checked for pending events. This simulates | 820 | * region in xen_iret_direct, after we've reenabled |
826 | iret instruction's behaviour where it delivers a | 821 | * events and checked for pending events. This simulates |
827 | pending interrupt when enabling interrupts. */ | 822 | * iret instruction's behaviour where it delivers a |
828 | movl PT_EIP(%esp),%eax | 823 | * pending interrupt when enabling interrupts: |
829 | cmpl $xen_iret_start_crit,%eax | 824 | */ |
830 | jb 1f | 825 | movl PT_EIP(%esp), %eax |
831 | cmpl $xen_iret_end_crit,%eax | 826 | cmpl $xen_iret_start_crit, %eax |
832 | jae 1f | 827 | jb 1f |
828 | cmpl $xen_iret_end_crit, %eax | ||
829 | jae 1f | ||
833 | 830 | ||
834 | jmp xen_iret_crit_fixup | 831 | jmp xen_iret_crit_fixup |
835 | 832 | ||
836 | ENTRY(xen_do_upcall) | 833 | ENTRY(xen_do_upcall) |
837 | 1: mov %esp, %eax | 834 | 1: mov %esp, %eax |
838 | call xen_evtchn_do_upcall | 835 | call xen_evtchn_do_upcall |
839 | #ifndef CONFIG_PREEMPT | 836 | #ifndef CONFIG_PREEMPT |
840 | call xen_maybe_preempt_hcall | 837 | call xen_maybe_preempt_hcall |
841 | #endif | 838 | #endif |
842 | jmp ret_from_intr | 839 | jmp ret_from_intr |
843 | ENDPROC(xen_hypervisor_callback) | 840 | ENDPROC(xen_hypervisor_callback) |
844 | 841 | ||
845 | # Hypervisor uses this for application faults while it executes. | 842 | /* |
846 | # We get here for two reasons: | 843 | * Hypervisor uses this for application faults while it executes. |
847 | # 1. Fault while reloading DS, ES, FS or GS | 844 | * We get here for two reasons: |
848 | # 2. Fault while executing IRET | 845 | * 1. Fault while reloading DS, ES, FS or GS |
849 | # Category 1 we fix up by reattempting the load, and zeroing the segment | 846 | * 2. Fault while executing IRET |
850 | # register if the load fails. | 847 | * Category 1 we fix up by reattempting the load, and zeroing the segment |
851 | # Category 2 we fix up by jumping to do_iret_error. We cannot use the | 848 | * register if the load fails. |
852 | # normal Linux return path in this case because if we use the IRET hypercall | 849 | * Category 2 we fix up by jumping to do_iret_error. We cannot use the |
853 | # to pop the stack frame we end up in an infinite loop of failsafe callbacks. | 850 | * normal Linux return path in this case because if we use the IRET hypercall |
854 | # We distinguish between categories by maintaining a status value in EAX. | 851 | * to pop the stack frame we end up in an infinite loop of failsafe callbacks. |
852 | * We distinguish between categories by maintaining a status value in EAX. | ||
853 | */ | ||
855 | ENTRY(xen_failsafe_callback) | 854 | ENTRY(xen_failsafe_callback) |
856 | pushl %eax | 855 | pushl %eax |
857 | movl $1,%eax | 856 | movl $1, %eax |
858 | 1: mov 4(%esp),%ds | 857 | 1: mov 4(%esp), %ds |
859 | 2: mov 8(%esp),%es | 858 | 2: mov 8(%esp), %es |
860 | 3: mov 12(%esp),%fs | 859 | 3: mov 12(%esp), %fs |
861 | 4: mov 16(%esp),%gs | 860 | 4: mov 16(%esp), %gs |
862 | /* EAX == 0 => Category 1 (Bad segment) | 861 | /* EAX == 0 => Category 1 (Bad segment) |
863 | EAX != 0 => Category 2 (Bad IRET) */ | 862 | EAX != 0 => Category 2 (Bad IRET) */ |
864 | testl %eax,%eax | 863 | testl %eax, %eax |
865 | popl %eax | 864 | popl %eax |
866 | lea 16(%esp),%esp | 865 | lea 16(%esp), %esp |
867 | jz 5f | 866 | jz 5f |
868 | jmp iret_exc | 867 | jmp iret_exc |
869 | 5: pushl $-1 /* orig_ax = -1 => not a system call */ | 868 | 5: pushl $-1 /* orig_ax = -1 => not a system call */ |
870 | SAVE_ALL | 869 | SAVE_ALL |
871 | jmp ret_from_exception | 870 | jmp ret_from_exception |
872 | 871 | ||
873 | .section .fixup,"ax" | 872 | .section .fixup, "ax" |
874 | 6: xorl %eax,%eax | 873 | 6: xorl %eax, %eax |
875 | movl %eax,4(%esp) | 874 | movl %eax, 4(%esp) |
876 | jmp 1b | 875 | jmp 1b |
877 | 7: xorl %eax,%eax | 876 | 7: xorl %eax, %eax |
878 | movl %eax,8(%esp) | 877 | movl %eax, 8(%esp) |
879 | jmp 2b | 878 | jmp 2b |
880 | 8: xorl %eax,%eax | 879 | 8: xorl %eax, %eax |
881 | movl %eax,12(%esp) | 880 | movl %eax, 12(%esp) |
882 | jmp 3b | 881 | jmp 3b |
883 | 9: xorl %eax,%eax | 882 | 9: xorl %eax, %eax |
884 | movl %eax,16(%esp) | 883 | movl %eax, 16(%esp) |
885 | jmp 4b | 884 | jmp 4b |
886 | .previous | 885 | .previous |
887 | _ASM_EXTABLE(1b,6b) | 886 | _ASM_EXTABLE(1b, 6b) |
888 | _ASM_EXTABLE(2b,7b) | 887 | _ASM_EXTABLE(2b, 7b) |
889 | _ASM_EXTABLE(3b,8b) | 888 | _ASM_EXTABLE(3b, 8b) |
890 | _ASM_EXTABLE(4b,9b) | 889 | _ASM_EXTABLE(4b, 9b) |
891 | ENDPROC(xen_failsafe_callback) | 890 | ENDPROC(xen_failsafe_callback) |
892 | 891 | ||
893 | BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, | 892 | BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR, |
894 | xen_evtchn_do_upcall) | 893 | xen_evtchn_do_upcall) |
895 | 894 | ||
896 | #endif /* CONFIG_XEN */ | 895 | #endif /* CONFIG_XEN */ |
897 | 896 | ||
898 | #if IS_ENABLED(CONFIG_HYPERV) | 897 | #if IS_ENABLED(CONFIG_HYPERV) |
899 | 898 | ||
@@ -910,28 +909,28 @@ ENTRY(mcount) | |||
910 | END(mcount) | 909 | END(mcount) |
911 | 910 | ||
912 | ENTRY(ftrace_caller) | 911 | ENTRY(ftrace_caller) |
913 | pushl %eax | 912 | pushl %eax |
914 | pushl %ecx | 913 | pushl %ecx |
915 | pushl %edx | 914 | pushl %edx |
916 | pushl $0 /* Pass NULL as regs pointer */ | 915 | pushl $0 /* Pass NULL as regs pointer */ |
917 | movl 4*4(%esp), %eax | 916 | movl 4*4(%esp), %eax |
918 | movl 0x4(%ebp), %edx | 917 | movl 0x4(%ebp), %edx |
919 | movl function_trace_op, %ecx | 918 | movl function_trace_op, %ecx |
920 | subl $MCOUNT_INSN_SIZE, %eax | 919 | subl $MCOUNT_INSN_SIZE, %eax |
921 | 920 | ||
922 | .globl ftrace_call | 921 | .globl ftrace_call |
923 | ftrace_call: | 922 | ftrace_call: |
924 | call ftrace_stub | 923 | call ftrace_stub |
925 | 924 | ||
926 | addl $4,%esp /* skip NULL pointer */ | 925 | addl $4, %esp /* skip NULL pointer */ |
927 | popl %edx | 926 | popl %edx |
928 | popl %ecx | 927 | popl %ecx |
929 | popl %eax | 928 | popl %eax |
930 | ftrace_ret: | 929 | ftrace_ret: |
931 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 930 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
932 | .globl ftrace_graph_call | 931 | .globl ftrace_graph_call |
933 | ftrace_graph_call: | 932 | ftrace_graph_call: |
934 | jmp ftrace_stub | 933 | jmp ftrace_stub |
935 | #endif | 934 | #endif |
936 | 935 | ||
937 | .globl ftrace_stub | 936 | .globl ftrace_stub |
@@ -949,72 +948,72 @@ ENTRY(ftrace_regs_caller) | |||
949 | * as the current return ip is. We move the return ip into the | 948 | * as the current return ip is. We move the return ip into the |
950 | * ip location, and move flags into the return ip location. | 949 | * ip location, and move flags into the return ip location. |
951 | */ | 950 | */ |
952 | pushl 4(%esp) /* save return ip into ip slot */ | 951 | pushl 4(%esp) /* save return ip into ip slot */ |
953 | 952 | ||
954 | pushl $0 /* Load 0 into orig_ax */ | 953 | pushl $0 /* Load 0 into orig_ax */ |
955 | pushl %gs | 954 | pushl %gs |
956 | pushl %fs | 955 | pushl %fs |
957 | pushl %es | 956 | pushl %es |
958 | pushl %ds | 957 | pushl %ds |
959 | pushl %eax | 958 | pushl %eax |
960 | pushl %ebp | 959 | pushl %ebp |
961 | pushl %edi | 960 | pushl %edi |
962 | pushl %esi | 961 | pushl %esi |
963 | pushl %edx | 962 | pushl %edx |
964 | pushl %ecx | 963 | pushl %ecx |
965 | pushl %ebx | 964 | pushl %ebx |
966 | 965 | ||
967 | movl 13*4(%esp), %eax /* Get the saved flags */ | 966 | movl 13*4(%esp), %eax /* Get the saved flags */ |
968 | movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ | 967 | movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ |
969 | /* clobbering return ip */ | 968 | /* clobbering return ip */ |
970 | movl $__KERNEL_CS,13*4(%esp) | 969 | movl $__KERNEL_CS, 13*4(%esp) |
971 | 970 | ||
972 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ | 971 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ |
973 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ | 972 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ |
974 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ | 973 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ |
975 | movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ | 974 | movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ |
976 | pushl %esp /* Save pt_regs as 4th parameter */ | 975 | pushl %esp /* Save pt_regs as 4th parameter */ |
977 | 976 | ||
978 | GLOBAL(ftrace_regs_call) | 977 | GLOBAL(ftrace_regs_call) |
979 | call ftrace_stub | 978 | call ftrace_stub |
980 | 979 | ||
981 | addl $4, %esp /* Skip pt_regs */ | 980 | addl $4, %esp /* Skip pt_regs */ |
982 | movl 14*4(%esp), %eax /* Move flags back into cs */ | 981 | movl 14*4(%esp), %eax /* Move flags back into cs */ |
983 | movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ | 982 | movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ |
984 | movl 12*4(%esp), %eax /* Get return ip from regs->ip */ | 983 | movl 12*4(%esp), %eax /* Get return ip from regs->ip */ |
985 | movl %eax, 14*4(%esp) /* Put return ip back for ret */ | 984 | movl %eax, 14*4(%esp) /* Put return ip back for ret */ |
986 | 985 | ||
987 | popl %ebx | 986 | popl %ebx |
988 | popl %ecx | 987 | popl %ecx |
989 | popl %edx | 988 | popl %edx |
990 | popl %esi | 989 | popl %esi |
991 | popl %edi | 990 | popl %edi |
992 | popl %ebp | 991 | popl %ebp |
993 | popl %eax | 992 | popl %eax |
994 | popl %ds | 993 | popl %ds |
995 | popl %es | 994 | popl %es |
996 | popl %fs | 995 | popl %fs |
997 | popl %gs | 996 | popl %gs |
998 | addl $8, %esp /* Skip orig_ax and ip */ | 997 | addl $8, %esp /* Skip orig_ax and ip */ |
999 | popf /* Pop flags at end (no addl to corrupt flags) */ | 998 | popf /* Pop flags at end (no addl to corrupt flags) */ |
1000 | jmp ftrace_ret | 999 | jmp ftrace_ret |
1001 | 1000 | ||
1002 | popf | 1001 | popf |
1003 | jmp ftrace_stub | 1002 | jmp ftrace_stub |
1004 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | 1003 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
1005 | 1004 | ||
1006 | ENTRY(mcount) | 1005 | ENTRY(mcount) |
1007 | cmpl $__PAGE_OFFSET, %esp | 1006 | cmpl $__PAGE_OFFSET, %esp |
1008 | jb ftrace_stub /* Paging not enabled yet? */ | 1007 | jb ftrace_stub /* Paging not enabled yet? */ |
1009 | 1008 | ||
1010 | cmpl $ftrace_stub, ftrace_trace_function | 1009 | cmpl $ftrace_stub, ftrace_trace_function |
1011 | jnz trace | 1010 | jnz trace |
1012 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1011 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1013 | cmpl $ftrace_stub, ftrace_graph_return | 1012 | cmpl $ftrace_stub, ftrace_graph_return |
1014 | jnz ftrace_graph_caller | 1013 | jnz ftrace_graph_caller |
1015 | 1014 | ||
1016 | cmpl $ftrace_graph_entry_stub, ftrace_graph_entry | 1015 | cmpl $ftrace_graph_entry_stub, ftrace_graph_entry |
1017 | jnz ftrace_graph_caller | 1016 | jnz ftrace_graph_caller |
1018 | #endif | 1017 | #endif |
1019 | .globl ftrace_stub | 1018 | .globl ftrace_stub |
1020 | ftrace_stub: | 1019 | ftrace_stub: |
@@ -1022,92 +1021,92 @@ ftrace_stub: | |||
1022 | 1021 | ||
1023 | /* taken from glibc */ | 1022 | /* taken from glibc */ |
1024 | trace: | 1023 | trace: |
1025 | pushl %eax | 1024 | pushl %eax |
1026 | pushl %ecx | 1025 | pushl %ecx |
1027 | pushl %edx | 1026 | pushl %edx |
1028 | movl 0xc(%esp), %eax | 1027 | movl 0xc(%esp), %eax |
1029 | movl 0x4(%ebp), %edx | 1028 | movl 0x4(%ebp), %edx |
1030 | subl $MCOUNT_INSN_SIZE, %eax | 1029 | subl $MCOUNT_INSN_SIZE, %eax |
1031 | 1030 | ||
1032 | call *ftrace_trace_function | 1031 | call *ftrace_trace_function |
1033 | 1032 | ||
1034 | popl %edx | 1033 | popl %edx |
1035 | popl %ecx | 1034 | popl %ecx |
1036 | popl %eax | 1035 | popl %eax |
1037 | jmp ftrace_stub | 1036 | jmp ftrace_stub |
1038 | END(mcount) | 1037 | END(mcount) |
1039 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1038 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1040 | #endif /* CONFIG_FUNCTION_TRACER */ | 1039 | #endif /* CONFIG_FUNCTION_TRACER */ |
1041 | 1040 | ||
1042 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1041 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1043 | ENTRY(ftrace_graph_caller) | 1042 | ENTRY(ftrace_graph_caller) |
1044 | pushl %eax | 1043 | pushl %eax |
1045 | pushl %ecx | 1044 | pushl %ecx |
1046 | pushl %edx | 1045 | pushl %edx |
1047 | movl 0xc(%esp), %eax | 1046 | movl 0xc(%esp), %eax |
1048 | lea 0x4(%ebp), %edx | 1047 | lea 0x4(%ebp), %edx |
1049 | movl (%ebp), %ecx | 1048 | movl (%ebp), %ecx |
1050 | subl $MCOUNT_INSN_SIZE, %eax | 1049 | subl $MCOUNT_INSN_SIZE, %eax |
1051 | call prepare_ftrace_return | 1050 | call prepare_ftrace_return |
1052 | popl %edx | 1051 | popl %edx |
1053 | popl %ecx | 1052 | popl %ecx |
1054 | popl %eax | 1053 | popl %eax |
1055 | ret | 1054 | ret |
1056 | END(ftrace_graph_caller) | 1055 | END(ftrace_graph_caller) |
1057 | 1056 | ||
1058 | .globl return_to_handler | 1057 | .globl return_to_handler |
1059 | return_to_handler: | 1058 | return_to_handler: |
1060 | pushl %eax | 1059 | pushl %eax |
1061 | pushl %edx | 1060 | pushl %edx |
1062 | movl %ebp, %eax | 1061 | movl %ebp, %eax |
1063 | call ftrace_return_to_handler | 1062 | call ftrace_return_to_handler |
1064 | movl %eax, %ecx | 1063 | movl %eax, %ecx |
1065 | popl %edx | 1064 | popl %edx |
1066 | popl %eax | 1065 | popl %eax |
1067 | jmp *%ecx | 1066 | jmp *%ecx |
1068 | #endif | 1067 | #endif |
1069 | 1068 | ||
1070 | #ifdef CONFIG_TRACING | 1069 | #ifdef CONFIG_TRACING |
1071 | ENTRY(trace_page_fault) | 1070 | ENTRY(trace_page_fault) |
1072 | ASM_CLAC | 1071 | ASM_CLAC |
1073 | pushl $trace_do_page_fault | 1072 | pushl $trace_do_page_fault |
1074 | jmp error_code | 1073 | jmp error_code |
1075 | END(trace_page_fault) | 1074 | END(trace_page_fault) |
1076 | #endif | 1075 | #endif |
1077 | 1076 | ||
1078 | ENTRY(page_fault) | 1077 | ENTRY(page_fault) |
1079 | ASM_CLAC | 1078 | ASM_CLAC |
1080 | pushl $do_page_fault | 1079 | pushl $do_page_fault |
1081 | ALIGN | 1080 | ALIGN |
1082 | error_code: | 1081 | error_code: |
1083 | /* the function address is in %gs's slot on the stack */ | 1082 | /* the function address is in %gs's slot on the stack */ |
1084 | pushl %fs | 1083 | pushl %fs |
1085 | pushl %es | 1084 | pushl %es |
1086 | pushl %ds | 1085 | pushl %ds |
1087 | pushl %eax | 1086 | pushl %eax |
1088 | pushl %ebp | 1087 | pushl %ebp |
1089 | pushl %edi | 1088 | pushl %edi |
1090 | pushl %esi | 1089 | pushl %esi |
1091 | pushl %edx | 1090 | pushl %edx |
1092 | pushl %ecx | 1091 | pushl %ecx |
1093 | pushl %ebx | 1092 | pushl %ebx |
1094 | cld | 1093 | cld |
1095 | movl $(__KERNEL_PERCPU), %ecx | 1094 | movl $(__KERNEL_PERCPU), %ecx |
1096 | movl %ecx, %fs | 1095 | movl %ecx, %fs |
1097 | UNWIND_ESPFIX_STACK | 1096 | UNWIND_ESPFIX_STACK |
1098 | GS_TO_REG %ecx | 1097 | GS_TO_REG %ecx |
1099 | movl PT_GS(%esp), %edi # get the function address | 1098 | movl PT_GS(%esp), %edi # get the function address |
1100 | movl PT_ORIG_EAX(%esp), %edx # get the error code | 1099 | movl PT_ORIG_EAX(%esp), %edx # get the error code |
1101 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart | 1100 | movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart |
1102 | REG_TO_PTGS %ecx | 1101 | REG_TO_PTGS %ecx |
1103 | SET_KERNEL_GS %ecx | 1102 | SET_KERNEL_GS %ecx |
1104 | movl $(__USER_DS), %ecx | 1103 | movl $(__USER_DS), %ecx |
1105 | movl %ecx, %ds | 1104 | movl %ecx, %ds |
1106 | movl %ecx, %es | 1105 | movl %ecx, %es |
1107 | TRACE_IRQS_OFF | 1106 | TRACE_IRQS_OFF |
1108 | movl %esp,%eax # pt_regs pointer | 1107 | movl %esp, %eax # pt_regs pointer |
1109 | call *%edi | 1108 | call *%edi |
1110 | jmp ret_from_exception | 1109 | jmp ret_from_exception |
1111 | END(page_fault) | 1110 | END(page_fault) |
1112 | 1111 | ||
1113 | /* | 1112 | /* |
@@ -1124,28 +1123,28 @@ END(page_fault) | |||
1124 | * the instruction that would have done it for sysenter. | 1123 | * the instruction that would have done it for sysenter. |
1125 | */ | 1124 | */ |
1126 | .macro FIX_STACK offset ok label | 1125 | .macro FIX_STACK offset ok label |
1127 | cmpw $__KERNEL_CS, 4(%esp) | 1126 | cmpw $__KERNEL_CS, 4(%esp) |
1128 | jne \ok | 1127 | jne \ok |
1129 | \label: | 1128 | \label: |
1130 | movl TSS_sysenter_sp0 + \offset(%esp), %esp | 1129 | movl TSS_sysenter_sp0 + \offset(%esp), %esp |
1131 | pushfl | 1130 | pushfl |
1132 | pushl $__KERNEL_CS | 1131 | pushl $__KERNEL_CS |
1133 | pushl $sysenter_past_esp | 1132 | pushl $sysenter_past_esp |
1134 | .endm | 1133 | .endm |
1135 | 1134 | ||
1136 | ENTRY(debug) | 1135 | ENTRY(debug) |
1137 | ASM_CLAC | 1136 | ASM_CLAC |
1138 | cmpl $entry_SYSENTER_32,(%esp) | 1137 | cmpl $entry_SYSENTER_32, (%esp) |
1139 | jne debug_stack_correct | 1138 | jne debug_stack_correct |
1140 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn | 1139 | FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn |
1141 | debug_stack_correct: | 1140 | debug_stack_correct: |
1142 | pushl $-1 # mark this as an int | 1141 | pushl $-1 # mark this as an int |
1143 | SAVE_ALL | 1142 | SAVE_ALL |
1144 | TRACE_IRQS_OFF | 1143 | TRACE_IRQS_OFF |
1145 | xorl %edx,%edx # error code 0 | 1144 | xorl %edx, %edx # error code 0 |
1146 | movl %esp,%eax # pt_regs pointer | 1145 | movl %esp, %eax # pt_regs pointer |
1147 | call do_debug | 1146 | call do_debug |
1148 | jmp ret_from_exception | 1147 | jmp ret_from_exception |
1149 | END(debug) | 1148 | END(debug) |
1150 | 1149 | ||
1151 | /* | 1150 | /* |
@@ -1159,91 +1158,91 @@ END(debug) | |||
1159 | ENTRY(nmi) | 1158 | ENTRY(nmi) |
1160 | ASM_CLAC | 1159 | ASM_CLAC |
1161 | #ifdef CONFIG_X86_ESPFIX32 | 1160 | #ifdef CONFIG_X86_ESPFIX32 |
1162 | pushl %eax | 1161 | pushl %eax |
1163 | movl %ss, %eax | 1162 | movl %ss, %eax |
1164 | cmpw $__ESPFIX_SS, %ax | 1163 | cmpw $__ESPFIX_SS, %ax |
1165 | popl %eax | 1164 | popl %eax |
1166 | je nmi_espfix_stack | 1165 | je nmi_espfix_stack |
1167 | #endif | 1166 | #endif |
1168 | cmpl $entry_SYSENTER_32,(%esp) | 1167 | cmpl $entry_SYSENTER_32, (%esp) |
1169 | je nmi_stack_fixup | 1168 | je nmi_stack_fixup |
1170 | pushl %eax | 1169 | pushl %eax |
1171 | movl %esp,%eax | 1170 | movl %esp, %eax |
1172 | /* Do not access memory above the end of our stack page, | 1171 | /* |
1172 | * Do not access memory above the end of our stack page, | ||
1173 | * it might not exist. | 1173 | * it might not exist. |
1174 | */ | 1174 | */ |
1175 | andl $(THREAD_SIZE-1),%eax | 1175 | andl $(THREAD_SIZE-1), %eax |
1176 | cmpl $(THREAD_SIZE-20),%eax | 1176 | cmpl $(THREAD_SIZE-20), %eax |
1177 | popl %eax | 1177 | popl %eax |
1178 | jae nmi_stack_correct | 1178 | jae nmi_stack_correct |
1179 | cmpl $entry_SYSENTER_32,12(%esp) | 1179 | cmpl $entry_SYSENTER_32, 12(%esp) |
1180 | je nmi_debug_stack_check | 1180 | je nmi_debug_stack_check |
1181 | nmi_stack_correct: | 1181 | nmi_stack_correct: |
1182 | pushl %eax | 1182 | pushl %eax |
1183 | SAVE_ALL | 1183 | SAVE_ALL |
1184 | xorl %edx,%edx # zero error code | 1184 | xorl %edx, %edx # zero error code |
1185 | movl %esp,%eax # pt_regs pointer | 1185 | movl %esp, %eax # pt_regs pointer |
1186 | call do_nmi | 1186 | call do_nmi |
1187 | jmp restore_all_notrace | 1187 | jmp restore_all_notrace |
1188 | 1188 | ||
1189 | nmi_stack_fixup: | 1189 | nmi_stack_fixup: |
1190 | FIX_STACK 12, nmi_stack_correct, 1 | 1190 | FIX_STACK 12, nmi_stack_correct, 1 |
1191 | jmp nmi_stack_correct | 1191 | jmp nmi_stack_correct |
1192 | 1192 | ||
1193 | nmi_debug_stack_check: | 1193 | nmi_debug_stack_check: |
1194 | cmpw $__KERNEL_CS,16(%esp) | 1194 | cmpw $__KERNEL_CS, 16(%esp) |
1195 | jne nmi_stack_correct | 1195 | jne nmi_stack_correct |
1196 | cmpl $debug,(%esp) | 1196 | cmpl $debug, (%esp) |
1197 | jb nmi_stack_correct | 1197 | jb nmi_stack_correct |
1198 | cmpl $debug_esp_fix_insn,(%esp) | 1198 | cmpl $debug_esp_fix_insn, (%esp) |
1199 | ja nmi_stack_correct | 1199 | ja nmi_stack_correct |
1200 | FIX_STACK 24, nmi_stack_correct, 1 | 1200 | FIX_STACK 24, nmi_stack_correct, 1 |
1201 | jmp nmi_stack_correct | 1201 | jmp nmi_stack_correct |
1202 | 1202 | ||
1203 | #ifdef CONFIG_X86_ESPFIX32 | 1203 | #ifdef CONFIG_X86_ESPFIX32 |
1204 | nmi_espfix_stack: | 1204 | nmi_espfix_stack: |
1205 | /* | 1205 | /* |
1206 | * create the pointer to lss back | 1206 | * create the pointer to lss back |
1207 | */ | 1207 | */ |
1208 | pushl %ss | 1208 | pushl %ss |
1209 | pushl %esp | 1209 | pushl %esp |
1210 | addl $4, (%esp) | 1210 | addl $4, (%esp) |
1211 | /* copy the iret frame of 12 bytes */ | 1211 | /* copy the iret frame of 12 bytes */ |
1212 | .rept 3 | 1212 | .rept 3 |
1213 | pushl 16(%esp) | 1213 | pushl 16(%esp) |
1214 | .endr | 1214 | .endr |
1215 | pushl %eax | 1215 | pushl %eax |
1216 | SAVE_ALL | 1216 | SAVE_ALL |
1217 | FIXUP_ESPFIX_STACK # %eax == %esp | 1217 | FIXUP_ESPFIX_STACK # %eax == %esp |
1218 | xorl %edx,%edx # zero error code | 1218 | xorl %edx, %edx # zero error code |
1219 | call do_nmi | 1219 | call do_nmi |
1220 | RESTORE_REGS | 1220 | RESTORE_REGS |
1221 | lss 12+4(%esp), %esp # back to espfix stack | 1221 | lss 12+4(%esp), %esp # back to espfix stack |
1222 | jmp irq_return | 1222 | jmp irq_return |
1223 | #endif | 1223 | #endif |
1224 | END(nmi) | 1224 | END(nmi) |
1225 | 1225 | ||
1226 | ENTRY(int3) | 1226 | ENTRY(int3) |
1227 | ASM_CLAC | 1227 | ASM_CLAC |
1228 | pushl $-1 # mark this as an int | 1228 | pushl $-1 # mark this as an int |
1229 | SAVE_ALL | 1229 | SAVE_ALL |
1230 | TRACE_IRQS_OFF | 1230 | TRACE_IRQS_OFF |
1231 | xorl %edx,%edx # zero error code | 1231 | xorl %edx, %edx # zero error code |
1232 | movl %esp,%eax # pt_regs pointer | 1232 | movl %esp, %eax # pt_regs pointer |
1233 | call do_int3 | 1233 | call do_int3 |
1234 | jmp ret_from_exception | 1234 | jmp ret_from_exception |
1235 | END(int3) | 1235 | END(int3) |
1236 | 1236 | ||
1237 | ENTRY(general_protection) | 1237 | ENTRY(general_protection) |
1238 | pushl $do_general_protection | 1238 | pushl $do_general_protection |
1239 | jmp error_code | 1239 | jmp error_code |
1240 | END(general_protection) | 1240 | END(general_protection) |
1241 | 1241 | ||
1242 | #ifdef CONFIG_KVM_GUEST | 1242 | #ifdef CONFIG_KVM_GUEST |
1243 | ENTRY(async_page_fault) | 1243 | ENTRY(async_page_fault) |
1244 | ASM_CLAC | 1244 | ASM_CLAC |
1245 | pushl $do_async_page_fault | 1245 | pushl $do_async_page_fault |
1246 | jmp error_code | 1246 | jmp error_code |
1247 | END(async_page_fault) | 1247 | END(async_page_fault) |
1248 | #endif | 1248 | #endif |
1249 | |||