aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/entry_64.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r--arch/x86/kernel/entry_64.S1417
1 files changed, 732 insertions, 685 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 54e0bbdccb99..e28c7a987793 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -11,15 +11,15 @@
11 * 11 *
12 * NOTE: This code handles signal-recognition, which happens every time 12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call. 13 * after an interrupt and after each system call.
14 * 14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is 15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al. 16 * only done for syscall tracing, signals or fork/exec et.al.
17 * 17 *
18 * A note on terminology: 18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP 19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack. 20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11. 21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved. 22 * - full stack frame: Like partial stack frame, but all register saved.
23 * 23 *
24 * Some macro usage: 24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better 25 * - CFI macros are used to generate dwarf2 unwind information for better
@@ -60,7 +60,6 @@
60#define __AUDIT_ARCH_LE 0x40000000 60#define __AUDIT_ARCH_LE 0x40000000
61 61
62 .code64 62 .code64
63
64#ifdef CONFIG_FUNCTION_TRACER 63#ifdef CONFIG_FUNCTION_TRACER
65#ifdef CONFIG_DYNAMIC_FTRACE 64#ifdef CONFIG_DYNAMIC_FTRACE
66ENTRY(mcount) 65ENTRY(mcount)
@@ -71,15 +70,7 @@ ENTRY(ftrace_caller)
71 cmpl $0, function_trace_stop 70 cmpl $0, function_trace_stop
72 jne ftrace_stub 71 jne ftrace_stub
73 72
74 /* taken from glibc */ 73 MCOUNT_SAVE_FRAME
75 subq $0x38, %rsp
76 movq %rax, (%rsp)
77 movq %rcx, 8(%rsp)
78 movq %rdx, 16(%rsp)
79 movq %rsi, 24(%rsp)
80 movq %rdi, 32(%rsp)
81 movq %r8, 40(%rsp)
82 movq %r9, 48(%rsp)
83 74
84 movq 0x38(%rsp), %rdi 75 movq 0x38(%rsp), %rdi
85 movq 8(%rbp), %rsi 76 movq 8(%rbp), %rsi
@@ -89,14 +80,7 @@ ENTRY(ftrace_caller)
89ftrace_call: 80ftrace_call:
90 call ftrace_stub 81 call ftrace_stub
91 82
92 movq 48(%rsp), %r9 83 MCOUNT_RESTORE_FRAME
93 movq 40(%rsp), %r8
94 movq 32(%rsp), %rdi
95 movq 24(%rsp), %rsi
96 movq 16(%rsp), %rdx
97 movq 8(%rsp), %rcx
98 movq (%rsp), %rax
99 addq $0x38, %rsp
100 84
101#ifdef CONFIG_FUNCTION_GRAPH_TRACER 85#ifdef CONFIG_FUNCTION_GRAPH_TRACER
102.globl ftrace_graph_call 86.globl ftrace_graph_call
@@ -130,15 +114,7 @@ ftrace_stub:
130 retq 114 retq
131 115
132trace: 116trace:
133 /* taken from glibc */ 117 MCOUNT_SAVE_FRAME
134 subq $0x38, %rsp
135 movq %rax, (%rsp)
136 movq %rcx, 8(%rsp)
137 movq %rdx, 16(%rsp)
138 movq %rsi, 24(%rsp)
139 movq %rdi, 32(%rsp)
140 movq %r8, 40(%rsp)
141 movq %r9, 48(%rsp)
142 118
143 movq 0x38(%rsp), %rdi 119 movq 0x38(%rsp), %rdi
144 movq 8(%rbp), %rsi 120 movq 8(%rbp), %rsi
@@ -146,14 +122,7 @@ trace:
146 122
147 call *ftrace_trace_function 123 call *ftrace_trace_function
148 124
149 movq 48(%rsp), %r9 125 MCOUNT_RESTORE_FRAME
150 movq 40(%rsp), %r8
151 movq 32(%rsp), %rdi
152 movq 24(%rsp), %rsi
153 movq 16(%rsp), %rdx
154 movq 8(%rsp), %rcx
155 movq (%rsp), %rax
156 addq $0x38, %rsp
157 126
158 jmp ftrace_stub 127 jmp ftrace_stub
159END(mcount) 128END(mcount)
@@ -165,14 +134,7 @@ ENTRY(ftrace_graph_caller)
165 cmpl $0, function_trace_stop 134 cmpl $0, function_trace_stop
166 jne ftrace_stub 135 jne ftrace_stub
167 136
168 subq $0x38, %rsp 137 MCOUNT_SAVE_FRAME
169 movq %rax, (%rsp)
170 movq %rcx, 8(%rsp)
171 movq %rdx, 16(%rsp)
172 movq %rsi, 24(%rsp)
173 movq %rdi, 32(%rsp)
174 movq %r8, 40(%rsp)
175 movq %r9, 48(%rsp)
176 138
177 leaq 8(%rbp), %rdi 139 leaq 8(%rbp), %rdi
178 movq 0x38(%rsp), %rsi 140 movq 0x38(%rsp), %rsi
@@ -180,14 +142,8 @@ ENTRY(ftrace_graph_caller)
180 142
181 call prepare_ftrace_return 143 call prepare_ftrace_return
182 144
183 movq 48(%rsp), %r9 145 MCOUNT_RESTORE_FRAME
184 movq 40(%rsp), %r8 146
185 movq 32(%rsp), %rdi
186 movq 24(%rsp), %rsi
187 movq 16(%rsp), %rdx
188 movq 8(%rsp), %rcx
189 movq (%rsp), %rax
190 addq $0x38, %rsp
191 retq 147 retq
192END(ftrace_graph_caller) 148END(ftrace_graph_caller)
193 149
@@ -225,7 +181,7 @@ return_to_handler:
225 181
226#ifndef CONFIG_PREEMPT 182#ifndef CONFIG_PREEMPT
227#define retint_kernel retint_restore_args 183#define retint_kernel retint_restore_args
228#endif 184#endif
229 185
230#ifdef CONFIG_PARAVIRT 186#ifdef CONFIG_PARAVIRT
231ENTRY(native_usergs_sysret64) 187ENTRY(native_usergs_sysret64)
@@ -244,29 +200,29 @@ ENTRY(native_usergs_sysret64)
244.endm 200.endm
245 201
246/* 202/*
247 * C code is not supposed to know about undefined top of stack. Every time 203 * C code is not supposed to know about undefined top of stack. Every time
248 * a C function with an pt_regs argument is called from the SYSCALL based 204 * a C function with an pt_regs argument is called from the SYSCALL based
249 * fast path FIXUP_TOP_OF_STACK is needed. 205 * fast path FIXUP_TOP_OF_STACK is needed.
250 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs 206 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
251 * manipulation. 207 * manipulation.
252 */ 208 */
253 209
254 /* %rsp:at FRAMEEND */ 210 /* %rsp:at FRAMEEND */
255 .macro FIXUP_TOP_OF_STACK tmp 211 .macro FIXUP_TOP_OF_STACK tmp offset=0
256 movq %gs:pda_oldrsp,\tmp 212 movq %gs:pda_oldrsp,\tmp
257 movq \tmp,RSP(%rsp) 213 movq \tmp,RSP+\offset(%rsp)
258 movq $__USER_DS,SS(%rsp) 214 movq $__USER_DS,SS+\offset(%rsp)
259 movq $__USER_CS,CS(%rsp) 215 movq $__USER_CS,CS+\offset(%rsp)
260 movq $-1,RCX(%rsp) 216 movq $-1,RCX+\offset(%rsp)
261 movq R11(%rsp),\tmp /* get eflags */ 217 movq R11+\offset(%rsp),\tmp /* get eflags */
262 movq \tmp,EFLAGS(%rsp) 218 movq \tmp,EFLAGS+\offset(%rsp)
263 .endm 219 .endm
264 220
265 .macro RESTORE_TOP_OF_STACK tmp,offset=0 221 .macro RESTORE_TOP_OF_STACK tmp offset=0
266 movq RSP-\offset(%rsp),\tmp 222 movq RSP+\offset(%rsp),\tmp
267 movq \tmp,%gs:pda_oldrsp 223 movq \tmp,%gs:pda_oldrsp
268 movq EFLAGS-\offset(%rsp),\tmp 224 movq EFLAGS+\offset(%rsp),\tmp
269 movq \tmp,R11-\offset(%rsp) 225 movq \tmp,R11+\offset(%rsp)
270 .endm 226 .endm
271 227
272 .macro FAKE_STACK_FRAME child_rip 228 .macro FAKE_STACK_FRAME child_rip
@@ -278,7 +234,7 @@ ENTRY(native_usergs_sysret64)
278 pushq %rax /* rsp */ 234 pushq %rax /* rsp */
279 CFI_ADJUST_CFA_OFFSET 8 235 CFI_ADJUST_CFA_OFFSET 8
280 CFI_REL_OFFSET rsp,0 236 CFI_REL_OFFSET rsp,0
281 pushq $(1<<9) /* eflags - interrupts on */ 237 pushq $X86_EFLAGS_IF /* eflags - interrupts on */
282 CFI_ADJUST_CFA_OFFSET 8 238 CFI_ADJUST_CFA_OFFSET 8
283 /*CFI_REL_OFFSET rflags,0*/ 239 /*CFI_REL_OFFSET rflags,0*/
284 pushq $__KERNEL_CS /* cs */ 240 pushq $__KERNEL_CS /* cs */
@@ -296,62 +252,184 @@ ENTRY(native_usergs_sysret64)
296 CFI_ADJUST_CFA_OFFSET -(6*8) 252 CFI_ADJUST_CFA_OFFSET -(6*8)
297 .endm 253 .endm
298 254
299 .macro CFI_DEFAULT_STACK start=1 255/*
256 * initial frame state for interrupts (and exceptions without error code)
257 */
258 .macro EMPTY_FRAME start=1 offset=0
300 .if \start 259 .if \start
301 CFI_STARTPROC simple 260 CFI_STARTPROC simple
302 CFI_SIGNAL_FRAME 261 CFI_SIGNAL_FRAME
303 CFI_DEF_CFA rsp,SS+8 262 CFI_DEF_CFA rsp,8+\offset
304 .else 263 .else
305 CFI_DEF_CFA_OFFSET SS+8 264 CFI_DEF_CFA_OFFSET 8+\offset
306 .endif 265 .endif
307 CFI_REL_OFFSET r15,R15
308 CFI_REL_OFFSET r14,R14
309 CFI_REL_OFFSET r13,R13
310 CFI_REL_OFFSET r12,R12
311 CFI_REL_OFFSET rbp,RBP
312 CFI_REL_OFFSET rbx,RBX
313 CFI_REL_OFFSET r11,R11
314 CFI_REL_OFFSET r10,R10
315 CFI_REL_OFFSET r9,R9
316 CFI_REL_OFFSET r8,R8
317 CFI_REL_OFFSET rax,RAX
318 CFI_REL_OFFSET rcx,RCX
319 CFI_REL_OFFSET rdx,RDX
320 CFI_REL_OFFSET rsi,RSI
321 CFI_REL_OFFSET rdi,RDI
322 CFI_REL_OFFSET rip,RIP
323 /*CFI_REL_OFFSET cs,CS*/
324 /*CFI_REL_OFFSET rflags,EFLAGS*/
325 CFI_REL_OFFSET rsp,RSP
326 /*CFI_REL_OFFSET ss,SS*/
327 .endm 266 .endm
267
328/* 268/*
329 * A newly forked process directly context switches into this. 269 * initial frame state for interrupts (and exceptions without error code)
330 */ 270 */
331/* rdi: prev */ 271 .macro INTR_FRAME start=1 offset=0
272 EMPTY_FRAME \start, SS+8+\offset-RIP
273 /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
274 CFI_REL_OFFSET rsp, RSP+\offset-RIP
275 /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
276 /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
277 CFI_REL_OFFSET rip, RIP+\offset-RIP
278 .endm
279
280/*
281 * initial frame state for exceptions with error code (and interrupts
282 * with vector already pushed)
283 */
284 .macro XCPT_FRAME start=1 offset=0
285 INTR_FRAME \start, RIP+\offset-ORIG_RAX
286 /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
287 .endm
288
289/*
290 * frame that enables calling into C.
291 */
292 .macro PARTIAL_FRAME start=1 offset=0
293 XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
294 CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
295 CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
296 CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
297 CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
298 CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
299 CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
300 CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
301 CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
302 CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
303 .endm
304
305/*
306 * frame that enables passing a complete pt_regs to a C function.
307 */
308 .macro DEFAULT_FRAME start=1 offset=0
309 PARTIAL_FRAME \start, R11+\offset-R15
310 CFI_REL_OFFSET rbx, RBX+\offset
311 CFI_REL_OFFSET rbp, RBP+\offset
312 CFI_REL_OFFSET r12, R12+\offset
313 CFI_REL_OFFSET r13, R13+\offset
314 CFI_REL_OFFSET r14, R14+\offset
315 CFI_REL_OFFSET r15, R15+\offset
316 .endm
317
318/* save partial stack frame */
319ENTRY(save_args)
320 XCPT_FRAME
321 cld
322 movq_cfi rdi, RDI+16-ARGOFFSET
323 movq_cfi rsi, RSI+16-ARGOFFSET
324 movq_cfi rdx, RDX+16-ARGOFFSET
325 movq_cfi rcx, RCX+16-ARGOFFSET
326 movq_cfi rax, RAX+16-ARGOFFSET
327 movq_cfi r8, R8+16-ARGOFFSET
328 movq_cfi r9, R9+16-ARGOFFSET
329 movq_cfi r10, R10+16-ARGOFFSET
330 movq_cfi r11, R11+16-ARGOFFSET
331
332 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
333 movq_cfi rbp, 8 /* push %rbp */
334 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
335 testl $3, CS(%rdi)
336 je 1f
337 SWAPGS
338 /*
339 * irqcount is used to check if a CPU is already on an interrupt stack
340 * or not. While this is essentially redundant with preempt_count it is
341 * a little cheaper to use a separate counter in the PDA (short of
342 * moving irq_enter into assembly, which would be too much work)
343 */
3441: incl %gs:pda_irqcount
345 jne 2f
346 popq_cfi %rax /* move return address... */
347 mov %gs:pda_irqstackptr,%rsp
348 EMPTY_FRAME 0
349 pushq_cfi %rax /* ... to the new stack */
350 /*
351 * We entered an interrupt context - irqs are off:
352 */
3532: TRACE_IRQS_OFF
354 ret
355 CFI_ENDPROC
356END(save_args)
357
358ENTRY(save_rest)
359 PARTIAL_FRAME 1 REST_SKIP+8
360 movq 5*8+16(%rsp), %r11 /* save return address */
361 movq_cfi rbx, RBX+16
362 movq_cfi rbp, RBP+16
363 movq_cfi r12, R12+16
364 movq_cfi r13, R13+16
365 movq_cfi r14, R14+16
366 movq_cfi r15, R15+16
367 movq %r11, 8(%rsp) /* return address */
368 FIXUP_TOP_OF_STACK %r11, 16
369 ret
370 CFI_ENDPROC
371END(save_rest)
372
373/* save complete stack frame */
374ENTRY(save_paranoid)
375 XCPT_FRAME 1 RDI+8
376 cld
377 movq_cfi rdi, RDI+8
378 movq_cfi rsi, RSI+8
379 movq_cfi rdx, RDX+8
380 movq_cfi rcx, RCX+8
381 movq_cfi rax, RAX+8
382 movq_cfi r8, R8+8
383 movq_cfi r9, R9+8
384 movq_cfi r10, R10+8
385 movq_cfi r11, R11+8
386 movq_cfi rbx, RBX+8
387 movq_cfi rbp, RBP+8
388 movq_cfi r12, R12+8
389 movq_cfi r13, R13+8
390 movq_cfi r14, R14+8
391 movq_cfi r15, R15+8
392 movl $1,%ebx
393 movl $MSR_GS_BASE,%ecx
394 rdmsr
395 testl %edx,%edx
396 js 1f /* negative -> in kernel */
397 SWAPGS
398 xorl %ebx,%ebx
3991: ret
400 CFI_ENDPROC
401END(save_paranoid)
402
403/*
404 * A newly forked process directly context switches into this address.
405 *
406 * rdi: prev task we switched from
407 */
332ENTRY(ret_from_fork) 408ENTRY(ret_from_fork)
333 CFI_DEFAULT_STACK 409 DEFAULT_FRAME
410
334 push kernel_eflags(%rip) 411 push kernel_eflags(%rip)
335 CFI_ADJUST_CFA_OFFSET 8 412 CFI_ADJUST_CFA_OFFSET 8
336 popf # reset kernel eflags 413 popf # reset kernel eflags
337 CFI_ADJUST_CFA_OFFSET -8 414 CFI_ADJUST_CFA_OFFSET -8
338 call schedule_tail 415
416 call schedule_tail # rdi: 'prev' task parameter
417
339 GET_THREAD_INFO(%rcx) 418 GET_THREAD_INFO(%rcx)
340 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) 419
341 jnz rff_trace 420 CFI_REMEMBER_STATE
342rff_action:
343 RESTORE_REST 421 RESTORE_REST
344 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? 422
423 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
345 je int_ret_from_sys_call 424 je int_ret_from_sys_call
346 testl $_TIF_IA32,TI_flags(%rcx) 425
426 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
347 jnz int_ret_from_sys_call 427 jnz int_ret_from_sys_call
348 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET 428
349 jmp ret_from_sys_call 429 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
350rff_trace: 430 jmp ret_from_sys_call # go to the SYSRET fastpath
351 movq %rsp,%rdi 431
352 call syscall_trace_leave 432 CFI_RESTORE_STATE
353 GET_THREAD_INFO(%rcx)
354 jmp rff_action
355 CFI_ENDPROC 433 CFI_ENDPROC
356END(ret_from_fork) 434END(ret_from_fork)
357 435
@@ -361,20 +439,20 @@ END(ret_from_fork)
361 * SYSCALL does not save anything on the stack and does not change the 439 * SYSCALL does not save anything on the stack and does not change the
362 * stack pointer. 440 * stack pointer.
363 */ 441 */
364 442
365/* 443/*
366 * Register setup: 444 * Register setup:
367 * rax system call number 445 * rax system call number
368 * rdi arg0 446 * rdi arg0
369 * rcx return address for syscall/sysret, C arg3 447 * rcx return address for syscall/sysret, C arg3
370 * rsi arg1 448 * rsi arg1
371 * rdx arg2 449 * rdx arg2
372 * r10 arg3 (--> moved to rcx for C) 450 * r10 arg3 (--> moved to rcx for C)
373 * r8 arg4 451 * r8 arg4
374 * r9 arg5 452 * r9 arg5
375 * r11 eflags for syscall/sysret, temporary for C 453 * r11 eflags for syscall/sysret, temporary for C
376 * r12-r15,rbp,rbx saved by C code, not touched. 454 * r12-r15,rbp,rbx saved by C code, not touched.
377 * 455 *
378 * Interrupts are off on entry. 456 * Interrupts are off on entry.
379 * Only called from user space. 457 * Only called from user space.
380 * 458 *
@@ -384,7 +462,7 @@ END(ret_from_fork)
384 * When user can change the frames always force IRET. That is because 462 * When user can change the frames always force IRET. That is because
385 * it deals with uncanonical addresses better. SYSRET has trouble 463 * it deals with uncanonical addresses better. SYSRET has trouble
386 * with them due to bugs in both AMD and Intel CPUs. 464 * with them due to bugs in both AMD and Intel CPUs.
387 */ 465 */
388 466
389ENTRY(system_call) 467ENTRY(system_call)
390 CFI_STARTPROC simple 468 CFI_STARTPROC simple
@@ -400,7 +478,7 @@ ENTRY(system_call)
400 */ 478 */
401ENTRY(system_call_after_swapgs) 479ENTRY(system_call_after_swapgs)
402 480
403 movq %rsp,%gs:pda_oldrsp 481 movq %rsp,%gs:pda_oldrsp
404 movq %gs:pda_kernelstack,%rsp 482 movq %gs:pda_kernelstack,%rsp
405 /* 483 /*
406 * No need to follow this irqs off/on section - it's straight 484 * No need to follow this irqs off/on section - it's straight
@@ -408,7 +486,7 @@ ENTRY(system_call_after_swapgs)
408 */ 486 */
409 ENABLE_INTERRUPTS(CLBR_NONE) 487 ENABLE_INTERRUPTS(CLBR_NONE)
410 SAVE_ARGS 8,1 488 SAVE_ARGS 8,1
411 movq %rax,ORIG_RAX-ARGOFFSET(%rsp) 489 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
412 movq %rcx,RIP-ARGOFFSET(%rsp) 490 movq %rcx,RIP-ARGOFFSET(%rsp)
413 CFI_REL_OFFSET rip,RIP-ARGOFFSET 491 CFI_REL_OFFSET rip,RIP-ARGOFFSET
414 GET_THREAD_INFO(%rcx) 492 GET_THREAD_INFO(%rcx)
@@ -422,19 +500,19 @@ system_call_fastpath:
422 movq %rax,RAX-ARGOFFSET(%rsp) 500 movq %rax,RAX-ARGOFFSET(%rsp)
423/* 501/*
424 * Syscall return path ending with SYSRET (fast path) 502 * Syscall return path ending with SYSRET (fast path)
425 * Has incomplete stack frame and undefined top of stack. 503 * Has incomplete stack frame and undefined top of stack.
426 */ 504 */
427ret_from_sys_call: 505ret_from_sys_call:
428 movl $_TIF_ALLWORK_MASK,%edi 506 movl $_TIF_ALLWORK_MASK,%edi
429 /* edi: flagmask */ 507 /* edi: flagmask */
430sysret_check: 508sysret_check:
431 LOCKDEP_SYS_EXIT 509 LOCKDEP_SYS_EXIT
432 GET_THREAD_INFO(%rcx) 510 GET_THREAD_INFO(%rcx)
433 DISABLE_INTERRUPTS(CLBR_NONE) 511 DISABLE_INTERRUPTS(CLBR_NONE)
434 TRACE_IRQS_OFF 512 TRACE_IRQS_OFF
435 movl TI_flags(%rcx),%edx 513 movl TI_flags(%rcx),%edx
436 andl %edi,%edx 514 andl %edi,%edx
437 jnz sysret_careful 515 jnz sysret_careful
438 CFI_REMEMBER_STATE 516 CFI_REMEMBER_STATE
439 /* 517 /*
440 * sysretq will re-enable interrupts: 518 * sysretq will re-enable interrupts:
@@ -449,7 +527,7 @@ sysret_check:
449 527
450 CFI_RESTORE_STATE 528 CFI_RESTORE_STATE
451 /* Handle reschedules */ 529 /* Handle reschedules */
452 /* edx: work, edi: workmask */ 530 /* edx: work, edi: workmask */
453sysret_careful: 531sysret_careful:
454 bt $TIF_NEED_RESCHED,%edx 532 bt $TIF_NEED_RESCHED,%edx
455 jnc sysret_signal 533 jnc sysret_signal
@@ -462,7 +540,7 @@ sysret_careful:
462 CFI_ADJUST_CFA_OFFSET -8 540 CFI_ADJUST_CFA_OFFSET -8
463 jmp sysret_check 541 jmp sysret_check
464 542
465 /* Handle a signal */ 543 /* Handle a signal */
466sysret_signal: 544sysret_signal:
467 TRACE_IRQS_ON 545 TRACE_IRQS_ON
468 ENABLE_INTERRUPTS(CLBR_NONE) 546 ENABLE_INTERRUPTS(CLBR_NONE)
@@ -471,17 +549,20 @@ sysret_signal:
471 jc sysret_audit 549 jc sysret_audit
472#endif 550#endif
473 /* edx: work flags (arg3) */ 551 /* edx: work flags (arg3) */
474 leaq do_notify_resume(%rip),%rax
475 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 552 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
476 xorl %esi,%esi # oldset -> arg2 553 xorl %esi,%esi # oldset -> arg2
477 call ptregscall_common 554 SAVE_REST
555 FIXUP_TOP_OF_STACK %r11
556 call do_notify_resume
557 RESTORE_TOP_OF_STACK %r11
558 RESTORE_REST
478 movl $_TIF_WORK_MASK,%edi 559 movl $_TIF_WORK_MASK,%edi
479 /* Use IRET because user could have changed frame. This 560 /* Use IRET because user could have changed frame. This
480 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ 561 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
481 DISABLE_INTERRUPTS(CLBR_NONE) 562 DISABLE_INTERRUPTS(CLBR_NONE)
482 TRACE_IRQS_OFF 563 TRACE_IRQS_OFF
483 jmp int_with_check 564 jmp int_with_check
484 565
485badsys: 566badsys:
486 movq $-ENOSYS,RAX-ARGOFFSET(%rsp) 567 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
487 jmp ret_from_sys_call 568 jmp ret_from_sys_call
@@ -520,7 +601,7 @@ sysret_audit:
520#endif /* CONFIG_AUDITSYSCALL */ 601#endif /* CONFIG_AUDITSYSCALL */
521 602
522 /* Do syscall tracing */ 603 /* Do syscall tracing */
523tracesys: 604tracesys:
524#ifdef CONFIG_AUDITSYSCALL 605#ifdef CONFIG_AUDITSYSCALL
525 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) 606 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
526 jz auditsys 607 jz auditsys
@@ -543,8 +624,8 @@ tracesys:
543 call *sys_call_table(,%rax,8) 624 call *sys_call_table(,%rax,8)
544 movq %rax,RAX-ARGOFFSET(%rsp) 625 movq %rax,RAX-ARGOFFSET(%rsp)
545 /* Use IRET because user could have changed frame */ 626 /* Use IRET because user could have changed frame */
546 627
547/* 628/*
548 * Syscall return path ending with IRET. 629 * Syscall return path ending with IRET.
549 * Has correct top of stack, but partial stack frame. 630 * Has correct top of stack, but partial stack frame.
550 */ 631 */
@@ -588,18 +669,18 @@ int_very_careful:
588 TRACE_IRQS_ON 669 TRACE_IRQS_ON
589 ENABLE_INTERRUPTS(CLBR_NONE) 670 ENABLE_INTERRUPTS(CLBR_NONE)
590 SAVE_REST 671 SAVE_REST
591 /* Check for syscall exit trace */ 672 /* Check for syscall exit trace */
592 testl $_TIF_WORK_SYSCALL_EXIT,%edx 673 testl $_TIF_WORK_SYSCALL_EXIT,%edx
593 jz int_signal 674 jz int_signal
594 pushq %rdi 675 pushq %rdi
595 CFI_ADJUST_CFA_OFFSET 8 676 CFI_ADJUST_CFA_OFFSET 8
596 leaq 8(%rsp),%rdi # &ptregs -> arg1 677 leaq 8(%rsp),%rdi # &ptregs -> arg1
597 call syscall_trace_leave 678 call syscall_trace_leave
598 popq %rdi 679 popq %rdi
599 CFI_ADJUST_CFA_OFFSET -8 680 CFI_ADJUST_CFA_OFFSET -8
600 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi 681 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
601 jmp int_restore_rest 682 jmp int_restore_rest
602 683
603int_signal: 684int_signal:
604 testl $_TIF_DO_NOTIFY_MASK,%edx 685 testl $_TIF_DO_NOTIFY_MASK,%edx
605 jz 1f 686 jz 1f
@@ -614,22 +695,24 @@ int_restore_rest:
614 jmp int_with_check 695 jmp int_with_check
615 CFI_ENDPROC 696 CFI_ENDPROC
616END(system_call) 697END(system_call)
617 698
618/* 699/*
619 * Certain special system calls that need to save a complete full stack frame. 700 * Certain special system calls that need to save a complete full stack frame.
620 */ 701 */
621
622 .macro PTREGSCALL label,func,arg 702 .macro PTREGSCALL label,func,arg
623 .globl \label 703ENTRY(\label)
624\label: 704 PARTIAL_FRAME 1 8 /* offset 8: return address */
625 leaq \func(%rip),%rax 705 subq $REST_SKIP, %rsp
626 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ 706 CFI_ADJUST_CFA_OFFSET REST_SKIP
627 jmp ptregscall_common 707 call save_rest
708 DEFAULT_FRAME 0 8 /* offset 8: return address */
709 leaq 8(%rsp), \arg /* pt_regs pointer */
710 call \func
711 jmp ptregscall_common
712 CFI_ENDPROC
628END(\label) 713END(\label)
629 .endm 714 .endm
630 715
631 CFI_STARTPROC
632
633 PTREGSCALL stub_clone, sys_clone, %r8 716 PTREGSCALL stub_clone, sys_clone, %r8
634 PTREGSCALL stub_fork, sys_fork, %rdi 717 PTREGSCALL stub_fork, sys_fork, %rdi
635 PTREGSCALL stub_vfork, sys_vfork, %rdi 718 PTREGSCALL stub_vfork, sys_vfork, %rdi
@@ -637,25 +720,18 @@ END(\label)
637 PTREGSCALL stub_iopl, sys_iopl, %rsi 720 PTREGSCALL stub_iopl, sys_iopl, %rsi
638 721
639ENTRY(ptregscall_common) 722ENTRY(ptregscall_common)
640 popq %r11 723 DEFAULT_FRAME 1 8 /* offset 8: return address */
641 CFI_ADJUST_CFA_OFFSET -8 724 RESTORE_TOP_OF_STACK %r11, 8
642 CFI_REGISTER rip, r11 725 movq_cfi_restore R15+8, r15
643 SAVE_REST 726 movq_cfi_restore R14+8, r14
644 movq %r11, %r15 727 movq_cfi_restore R13+8, r13
645 CFI_REGISTER rip, r15 728 movq_cfi_restore R12+8, r12
646 FIXUP_TOP_OF_STACK %r11 729 movq_cfi_restore RBP+8, rbp
647 call *%rax 730 movq_cfi_restore RBX+8, rbx
648 RESTORE_TOP_OF_STACK %r11 731 ret $REST_SKIP /* pop extended registers */
649 movq %r15, %r11
650 CFI_REGISTER rip, r11
651 RESTORE_REST
652 pushq %r11
653 CFI_ADJUST_CFA_OFFSET 8
654 CFI_REL_OFFSET rip, 0
655 ret
656 CFI_ENDPROC 732 CFI_ENDPROC
657END(ptregscall_common) 733END(ptregscall_common)
658 734
659ENTRY(stub_execve) 735ENTRY(stub_execve)
660 CFI_STARTPROC 736 CFI_STARTPROC
661 popq %r11 737 popq %r11
@@ -671,11 +747,11 @@ ENTRY(stub_execve)
671 jmp int_ret_from_sys_call 747 jmp int_ret_from_sys_call
672 CFI_ENDPROC 748 CFI_ENDPROC
673END(stub_execve) 749END(stub_execve)
674 750
675/* 751/*
676 * sigreturn is special because it needs to restore all registers on return. 752 * sigreturn is special because it needs to restore all registers on return.
677 * This cannot be done with SYSRET, so use the IRET return path instead. 753 * This cannot be done with SYSRET, so use the IRET return path instead.
678 */ 754 */
679ENTRY(stub_rt_sigreturn) 755ENTRY(stub_rt_sigreturn)
680 CFI_STARTPROC 756 CFI_STARTPROC
681 addq $8, %rsp 757 addq $8, %rsp
@@ -691,70 +767,70 @@ ENTRY(stub_rt_sigreturn)
691END(stub_rt_sigreturn) 767END(stub_rt_sigreturn)
692 768
693/* 769/*
694 * initial frame state for interrupts and exceptions 770 * Build the entry stubs and pointer table with some assembler magic.
771 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
772 * single cache line on all modern x86 implementations.
695 */ 773 */
696 .macro _frame ref 774 .section .init.rodata,"a"
697 CFI_STARTPROC simple 775ENTRY(interrupt)
698 CFI_SIGNAL_FRAME 776 .text
699 CFI_DEF_CFA rsp,SS+8-\ref 777 .p2align 5
700 /*CFI_REL_OFFSET ss,SS-\ref*/ 778 .p2align CONFIG_X86_L1_CACHE_SHIFT
701 CFI_REL_OFFSET rsp,RSP-\ref 779ENTRY(irq_entries_start)
702 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/ 780 INTR_FRAME
703 /*CFI_REL_OFFSET cs,CS-\ref*/ 781vector=FIRST_EXTERNAL_VECTOR
704 CFI_REL_OFFSET rip,RIP-\ref 782.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
705 .endm 783 .balign 32
784 .rept 7
785 .if vector < NR_VECTORS
786 .if vector <> FIRST_EXTERNAL_VECTOR
787 CFI_ADJUST_CFA_OFFSET -8
788 .endif
7891: pushq $(~vector+0x80) /* Note: always in signed byte range */
790 CFI_ADJUST_CFA_OFFSET 8
791 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
792 jmp 2f
793 .endif
794 .previous
795 .quad 1b
796 .text
797vector=vector+1
798 .endif
799 .endr
8002: jmp common_interrupt
801.endr
802 CFI_ENDPROC
803END(irq_entries_start)
706 804
707/* initial frame state for interrupts (and exceptions without error code) */ 805.previous
708#define INTR_FRAME _frame RIP 806END(interrupt)
709/* initial frame state for exceptions with error code (and interrupts with 807.previous
710 vector already pushed) */
711#define XCPT_FRAME _frame ORIG_RAX
712 808
713/* 809/*
714 * Interrupt entry/exit. 810 * Interrupt entry/exit.
715 * 811 *
716 * Interrupt entry points save only callee clobbered registers in fast path. 812 * Interrupt entry points save only callee clobbered registers in fast path.
717 * 813 *
718 * Entry runs with interrupts off. 814 * Entry runs with interrupts off.
719 */ 815 */
720 816
721/* 0(%rsp): interrupt number */ 817/* 0(%rsp): ~(interrupt number) */
722 .macro interrupt func 818 .macro interrupt func
723 cld 819 subq $10*8, %rsp
724 SAVE_ARGS 820 CFI_ADJUST_CFA_OFFSET 10*8
725 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler 821 call save_args
726 pushq %rbp 822 PARTIAL_FRAME 0
727 /*
728 * Save rbp twice: One is for marking the stack frame, as usual, and the
729 * other, to fill pt_regs properly. This is because bx comes right
730 * before the last saved register in that structure, and not bp. If the
731 * base pointer were in the place bx is today, this would not be needed.
732 */
733 movq %rbp, -8(%rsp)
734 CFI_ADJUST_CFA_OFFSET 8
735 CFI_REL_OFFSET rbp, 0
736 movq %rsp,%rbp
737 CFI_DEF_CFA_REGISTER rbp
738 testl $3,CS(%rdi)
739 je 1f
740 SWAPGS
741 /* irqcount is used to check if a CPU is already on an interrupt
742 stack or not. While this is essentially redundant with preempt_count
743 it is a little cheaper to use a separate counter in the PDA
744 (short of moving irq_enter into assembly, which would be too
745 much work) */
7461: incl %gs:pda_irqcount
747 cmoveq %gs:pda_irqstackptr,%rsp
748 push %rbp # backlink for old unwinder
749 /*
750 * We entered an interrupt context - irqs are off:
751 */
752 TRACE_IRQS_OFF
753 call \func 823 call \func
754 .endm 824 .endm
755 825
756ENTRY(common_interrupt) 826 /*
827 * The interrupt stubs push (~vector+0x80) onto the stack and
828 * then jump to common_interrupt.
829 */
830 .p2align CONFIG_X86_L1_CACHE_SHIFT
831common_interrupt:
757 XCPT_FRAME 832 XCPT_FRAME
833 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
758 interrupt do_IRQ 834 interrupt do_IRQ
759 /* 0(%rsp): oldrsp-ARGOFFSET */ 835 /* 0(%rsp): oldrsp-ARGOFFSET */
760ret_from_intr: 836ret_from_intr:
@@ -768,12 +844,12 @@ exit_intr:
768 GET_THREAD_INFO(%rcx) 844 GET_THREAD_INFO(%rcx)
769 testl $3,CS-ARGOFFSET(%rsp) 845 testl $3,CS-ARGOFFSET(%rsp)
770 je retint_kernel 846 je retint_kernel
771 847
772 /* Interrupt came from user space */ 848 /* Interrupt came from user space */
773 /* 849 /*
774 * Has a correct top of stack, but a partial stack frame 850 * Has a correct top of stack, but a partial stack frame
775 * %rcx: thread info. Interrupts off. 851 * %rcx: thread info. Interrupts off.
776 */ 852 */
777retint_with_reschedule: 853retint_with_reschedule:
778 movl $_TIF_WORK_MASK,%edi 854 movl $_TIF_WORK_MASK,%edi
779retint_check: 855retint_check:
@@ -846,20 +922,20 @@ retint_careful:
846 pushq %rdi 922 pushq %rdi
847 CFI_ADJUST_CFA_OFFSET 8 923 CFI_ADJUST_CFA_OFFSET 8
848 call schedule 924 call schedule
849 popq %rdi 925 popq %rdi
850 CFI_ADJUST_CFA_OFFSET -8 926 CFI_ADJUST_CFA_OFFSET -8
851 GET_THREAD_INFO(%rcx) 927 GET_THREAD_INFO(%rcx)
852 DISABLE_INTERRUPTS(CLBR_NONE) 928 DISABLE_INTERRUPTS(CLBR_NONE)
853 TRACE_IRQS_OFF 929 TRACE_IRQS_OFF
854 jmp retint_check 930 jmp retint_check
855 931
856retint_signal: 932retint_signal:
857 testl $_TIF_DO_NOTIFY_MASK,%edx 933 testl $_TIF_DO_NOTIFY_MASK,%edx
858 jz retint_swapgs 934 jz retint_swapgs
859 TRACE_IRQS_ON 935 TRACE_IRQS_ON
860 ENABLE_INTERRUPTS(CLBR_NONE) 936 ENABLE_INTERRUPTS(CLBR_NONE)
861 SAVE_REST 937 SAVE_REST
862 movq $-1,ORIG_RAX(%rsp) 938 movq $-1,ORIG_RAX(%rsp)
863 xorl %esi,%esi # oldset 939 xorl %esi,%esi # oldset
864 movq %rsp,%rdi # &pt_regs 940 movq %rsp,%rdi # &pt_regs
865 call do_notify_resume 941 call do_notify_resume
@@ -881,324 +957,211 @@ ENTRY(retint_kernel)
881 jnc retint_restore_args 957 jnc retint_restore_args
882 call preempt_schedule_irq 958 call preempt_schedule_irq
883 jmp exit_intr 959 jmp exit_intr
884#endif 960#endif
885 961
886 CFI_ENDPROC 962 CFI_ENDPROC
887END(common_interrupt) 963END(common_interrupt)
888 964
889/* 965/*
890 * APIC interrupts. 966 * APIC interrupts.
891 */ 967 */
892 .macro apicinterrupt num,func 968.macro apicinterrupt num sym do_sym
969ENTRY(\sym)
893 INTR_FRAME 970 INTR_FRAME
894 pushq $~(\num) 971 pushq $~(\num)
895 CFI_ADJUST_CFA_OFFSET 8 972 CFI_ADJUST_CFA_OFFSET 8
896 interrupt \func 973 interrupt \do_sym
897 jmp ret_from_intr 974 jmp ret_from_intr
898 CFI_ENDPROC 975 CFI_ENDPROC
899 .endm 976END(\sym)
900 977.endm
901ENTRY(thermal_interrupt)
902 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
903END(thermal_interrupt)
904
905ENTRY(threshold_interrupt)
906 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
907END(threshold_interrupt)
908
909#ifdef CONFIG_SMP
910ENTRY(reschedule_interrupt)
911 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
912END(reschedule_interrupt)
913
914 .macro INVALIDATE_ENTRY num
915ENTRY(invalidate_interrupt\num)
916 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
917END(invalidate_interrupt\num)
918 .endm
919 978
920 INVALIDATE_ENTRY 0 979#ifdef CONFIG_SMP
921 INVALIDATE_ENTRY 1 980apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
922 INVALIDATE_ENTRY 2 981 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
923 INVALIDATE_ENTRY 3
924 INVALIDATE_ENTRY 4
925 INVALIDATE_ENTRY 5
926 INVALIDATE_ENTRY 6
927 INVALIDATE_ENTRY 7
928
929ENTRY(call_function_interrupt)
930 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
931END(call_function_interrupt)
932ENTRY(call_function_single_interrupt)
933 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
934END(call_function_single_interrupt)
935ENTRY(irq_move_cleanup_interrupt)
936 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
937END(irq_move_cleanup_interrupt)
938#endif 982#endif
939 983
940ENTRY(apic_timer_interrupt) 984apicinterrupt UV_BAU_MESSAGE \
941 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt 985 uv_bau_message_intr1 uv_bau_message_interrupt
942END(apic_timer_interrupt) 986apicinterrupt LOCAL_TIMER_VECTOR \
987 apic_timer_interrupt smp_apic_timer_interrupt
988
989#ifdef CONFIG_SMP
990apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
991 invalidate_interrupt0 smp_invalidate_interrupt
992apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \
993 invalidate_interrupt1 smp_invalidate_interrupt
994apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
995 invalidate_interrupt2 smp_invalidate_interrupt
996apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
997 invalidate_interrupt3 smp_invalidate_interrupt
998apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
999 invalidate_interrupt4 smp_invalidate_interrupt
1000apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \
1001 invalidate_interrupt5 smp_invalidate_interrupt
1002apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \
1003 invalidate_interrupt6 smp_invalidate_interrupt
1004apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
1005 invalidate_interrupt7 smp_invalidate_interrupt
1006#endif
943 1007
944ENTRY(uv_bau_message_intr1) 1008apicinterrupt THRESHOLD_APIC_VECTOR \
945 apicinterrupt 220,uv_bau_message_interrupt 1009 threshold_interrupt mce_threshold_interrupt
946END(uv_bau_message_intr1) 1010apicinterrupt THERMAL_APIC_VECTOR \
1011 thermal_interrupt smp_thermal_interrupt
1012
1013#ifdef CONFIG_SMP
1014apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
1015 call_function_single_interrupt smp_call_function_single_interrupt
1016apicinterrupt CALL_FUNCTION_VECTOR \
1017 call_function_interrupt smp_call_function_interrupt
1018apicinterrupt RESCHEDULE_VECTOR \
1019 reschedule_interrupt smp_reschedule_interrupt
1020#endif
947 1021
948ENTRY(error_interrupt) 1022apicinterrupt ERROR_APIC_VECTOR \
949 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt 1023 error_interrupt smp_error_interrupt
950END(error_interrupt) 1024apicinterrupt SPURIOUS_APIC_VECTOR \
1025 spurious_interrupt smp_spurious_interrupt
951 1026
952ENTRY(spurious_interrupt)
953 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
954END(spurious_interrupt)
955
956/* 1027/*
957 * Exception entry points. 1028 * Exception entry points.
958 */ 1029 */
959 .macro zeroentry sym 1030.macro zeroentry sym do_sym
1031ENTRY(\sym)
960 INTR_FRAME 1032 INTR_FRAME
961 PARAVIRT_ADJUST_EXCEPTION_FRAME 1033 PARAVIRT_ADJUST_EXCEPTION_FRAME
962 pushq $0 /* push error code/oldrax */ 1034 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
963 CFI_ADJUST_CFA_OFFSET 8 1035 subq $15*8,%rsp
964 pushq %rax /* push real oldrax to the rdi slot */ 1036 CFI_ADJUST_CFA_OFFSET 15*8
965 CFI_ADJUST_CFA_OFFSET 8 1037 call error_entry
966 CFI_REL_OFFSET rax,0 1038 DEFAULT_FRAME 0
967 leaq \sym(%rip),%rax 1039 movq %rsp,%rdi /* pt_regs pointer */
968 jmp error_entry 1040 xorl %esi,%esi /* no error code */
1041 call \do_sym
1042 jmp error_exit /* %ebx: no swapgs flag */
969 CFI_ENDPROC 1043 CFI_ENDPROC
970 .endm 1044END(\sym)
1045.endm
971 1046
972 .macro errorentry sym 1047.macro paranoidzeroentry sym do_sym
973 XCPT_FRAME 1048ENTRY(\sym)
1049 INTR_FRAME
974 PARAVIRT_ADJUST_EXCEPTION_FRAME 1050 PARAVIRT_ADJUST_EXCEPTION_FRAME
975 pushq %rax 1051 pushq $-1 /* ORIG_RAX: no syscall to restart */
976 CFI_ADJUST_CFA_OFFSET 8 1052 CFI_ADJUST_CFA_OFFSET 8
977 CFI_REL_OFFSET rax,0 1053 subq $15*8, %rsp
978 leaq \sym(%rip),%rax 1054 call save_paranoid
979 jmp error_entry 1055 TRACE_IRQS_OFF
1056 movq %rsp,%rdi /* pt_regs pointer */
1057 xorl %esi,%esi /* no error code */
1058 call \do_sym
1059 jmp paranoid_exit /* %ebx: no swapgs flag */
980 CFI_ENDPROC 1060 CFI_ENDPROC
981 .endm 1061END(\sym)
1062.endm
982 1063
983 /* error code is on the stack already */ 1064.macro paranoidzeroentry_ist sym do_sym ist
984 /* handle NMI like exceptions that can happen everywhere */ 1065ENTRY(\sym)
985 .macro paranoidentry sym, ist=0, irqtrace=1 1066 INTR_FRAME
986 SAVE_ALL 1067 PARAVIRT_ADJUST_EXCEPTION_FRAME
987 cld 1068 pushq $-1 /* ORIG_RAX: no syscall to restart */
988 movl $1,%ebx 1069 CFI_ADJUST_CFA_OFFSET 8
989 movl $MSR_GS_BASE,%ecx 1070 subq $15*8, %rsp
990 rdmsr 1071 call save_paranoid
991 testl %edx,%edx
992 js 1f
993 SWAPGS
994 xorl %ebx,%ebx
9951:
996 .if \ist
997 movq %gs:pda_data_offset, %rbp
998 .endif
999 .if \irqtrace
1000 TRACE_IRQS_OFF
1001 .endif
1002 movq %rsp,%rdi
1003 movq ORIG_RAX(%rsp),%rsi
1004 movq $-1,ORIG_RAX(%rsp)
1005 .if \ist
1006 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1007 .endif
1008 call \sym
1009 .if \ist
1010 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1011 .endif
1012 DISABLE_INTERRUPTS(CLBR_NONE)
1013 .if \irqtrace
1014 TRACE_IRQS_OFF 1072 TRACE_IRQS_OFF
1015 .endif 1073 movq %rsp,%rdi /* pt_regs pointer */
1016 .endm 1074 xorl %esi,%esi /* no error code */
1075 movq %gs:pda_data_offset, %rbp
1076 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1077 call \do_sym
1078 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1079 jmp paranoid_exit /* %ebx: no swapgs flag */
1080 CFI_ENDPROC
1081END(\sym)
1082.endm
1017 1083
1018 /* 1084.macro errorentry sym do_sym
1019 * "Paranoid" exit path from exception stack. 1085ENTRY(\sym)
1020 * Paranoid because this is used by NMIs and cannot take 1086 XCPT_FRAME
1021 * any kernel state for granted. 1087 PARAVIRT_ADJUST_EXCEPTION_FRAME
1022 * We don't do kernel preemption checks here, because only 1088 subq $15*8,%rsp
1023 * NMI should be common and it does not enable IRQs and 1089 CFI_ADJUST_CFA_OFFSET 15*8
1024 * cannot get reschedule ticks. 1090 call error_entry
1025 * 1091 DEFAULT_FRAME 0
1026 * "trace" is 0 for the NMI handler only, because irq-tracing 1092 movq %rsp,%rdi /* pt_regs pointer */
1027 * is fundamentally NMI-unsafe. (we cannot change the soft and 1093 movq ORIG_RAX(%rsp),%rsi /* get error code */
1028 * hard flags at once, atomically) 1094 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
1029 */ 1095 call \do_sym
1030 .macro paranoidexit trace=1 1096 jmp error_exit /* %ebx: no swapgs flag */
1031 /* ebx: no swapgs flag */
1032paranoid_exit\trace:
1033 testl %ebx,%ebx /* swapgs needed? */
1034 jnz paranoid_restore\trace
1035 testl $3,CS(%rsp)
1036 jnz paranoid_userspace\trace
1037paranoid_swapgs\trace:
1038 .if \trace
1039 TRACE_IRQS_IRETQ 0
1040 .endif
1041 SWAPGS_UNSAFE_STACK
1042paranoid_restore\trace:
1043 RESTORE_ALL 8
1044 jmp irq_return
1045paranoid_userspace\trace:
1046 GET_THREAD_INFO(%rcx)
1047 movl TI_flags(%rcx),%ebx
1048 andl $_TIF_WORK_MASK,%ebx
1049 jz paranoid_swapgs\trace
1050 movq %rsp,%rdi /* &pt_regs */
1051 call sync_regs
1052 movq %rax,%rsp /* switch stack for scheduling */
1053 testl $_TIF_NEED_RESCHED,%ebx
1054 jnz paranoid_schedule\trace
1055 movl %ebx,%edx /* arg3: thread flags */
1056 .if \trace
1057 TRACE_IRQS_ON
1058 .endif
1059 ENABLE_INTERRUPTS(CLBR_NONE)
1060 xorl %esi,%esi /* arg2: oldset */
1061 movq %rsp,%rdi /* arg1: &pt_regs */
1062 call do_notify_resume
1063 DISABLE_INTERRUPTS(CLBR_NONE)
1064 .if \trace
1065 TRACE_IRQS_OFF
1066 .endif
1067 jmp paranoid_userspace\trace
1068paranoid_schedule\trace:
1069 .if \trace
1070 TRACE_IRQS_ON
1071 .endif
1072 ENABLE_INTERRUPTS(CLBR_ANY)
1073 call schedule
1074 DISABLE_INTERRUPTS(CLBR_ANY)
1075 .if \trace
1076 TRACE_IRQS_OFF
1077 .endif
1078 jmp paranoid_userspace\trace
1079 CFI_ENDPROC 1097 CFI_ENDPROC
1080 .endm 1098END(\sym)
1099.endm
1081 1100
1082/* 1101 /* error code is on the stack already */
1083 * Exception entry point. This expects an error code/orig_rax on the stack 1102.macro paranoiderrorentry sym do_sym
1084 * and the exception handler in %rax. 1103ENTRY(\sym)
1085 */ 1104 XCPT_FRAME
1086KPROBE_ENTRY(error_entry) 1105 PARAVIRT_ADJUST_EXCEPTION_FRAME
1087 _frame RDI 1106 subq $15*8,%rsp
1088 CFI_REL_OFFSET rax,0 1107 CFI_ADJUST_CFA_OFFSET 15*8
1089 /* rdi slot contains rax, oldrax contains error code */ 1108 call save_paranoid
1090 cld 1109 DEFAULT_FRAME 0
1091 subq $14*8,%rsp
1092 CFI_ADJUST_CFA_OFFSET (14*8)
1093 movq %rsi,13*8(%rsp)
1094 CFI_REL_OFFSET rsi,RSI
1095 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
1096 CFI_REGISTER rax,rsi
1097 movq %rdx,12*8(%rsp)
1098 CFI_REL_OFFSET rdx,RDX
1099 movq %rcx,11*8(%rsp)
1100 CFI_REL_OFFSET rcx,RCX
1101 movq %rsi,10*8(%rsp) /* store rax */
1102 CFI_REL_OFFSET rax,RAX
1103 movq %r8, 9*8(%rsp)
1104 CFI_REL_OFFSET r8,R8
1105 movq %r9, 8*8(%rsp)
1106 CFI_REL_OFFSET r9,R9
1107 movq %r10,7*8(%rsp)
1108 CFI_REL_OFFSET r10,R10
1109 movq %r11,6*8(%rsp)
1110 CFI_REL_OFFSET r11,R11
1111 movq %rbx,5*8(%rsp)
1112 CFI_REL_OFFSET rbx,RBX
1113 movq %rbp,4*8(%rsp)
1114 CFI_REL_OFFSET rbp,RBP
1115 movq %r12,3*8(%rsp)
1116 CFI_REL_OFFSET r12,R12
1117 movq %r13,2*8(%rsp)
1118 CFI_REL_OFFSET r13,R13
1119 movq %r14,1*8(%rsp)
1120 CFI_REL_OFFSET r14,R14
1121 movq %r15,(%rsp)
1122 CFI_REL_OFFSET r15,R15
1123 xorl %ebx,%ebx
1124 testl $3,CS(%rsp)
1125 je error_kernelspace
1126error_swapgs:
1127 SWAPGS
1128error_sti:
1129 TRACE_IRQS_OFF
1130 movq %rdi,RDI(%rsp)
1131 CFI_REL_OFFSET rdi,RDI
1132 movq %rsp,%rdi
1133 movq ORIG_RAX(%rsp),%rsi /* get error code */
1134 movq $-1,ORIG_RAX(%rsp)
1135 call *%rax
1136 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1137error_exit:
1138 movl %ebx,%eax
1139 RESTORE_REST
1140 DISABLE_INTERRUPTS(CLBR_NONE)
1141 TRACE_IRQS_OFF 1110 TRACE_IRQS_OFF
1142 GET_THREAD_INFO(%rcx) 1111 movq %rsp,%rdi /* pt_regs pointer */
1143 testl %eax,%eax 1112 movq ORIG_RAX(%rsp),%rsi /* get error code */
1144 jne retint_kernel 1113 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
1145 LOCKDEP_SYS_EXIT_IRQ 1114 call \do_sym
1146 movl TI_flags(%rcx),%edx 1115 jmp paranoid_exit /* %ebx: no swapgs flag */
1147 movl $_TIF_WORK_MASK,%edi
1148 andl %edi,%edx
1149 jnz retint_careful
1150 jmp retint_swapgs
1151 CFI_ENDPROC 1116 CFI_ENDPROC
1117END(\sym)
1118.endm
1152 1119
1153error_kernelspace: 1120zeroentry divide_error do_divide_error
1154 incl %ebx 1121zeroentry overflow do_overflow
1155 /* There are two places in the kernel that can potentially fault with 1122zeroentry bounds do_bounds
1156 usergs. Handle them here. The exception handlers after 1123zeroentry invalid_op do_invalid_op
1157 iret run with kernel gs again, so don't set the user space flag. 1124zeroentry device_not_available do_device_not_available
1158 B stepping K8s sometimes report an truncated RIP for IRET 1125paranoiderrorentry double_fault do_double_fault
1159 exceptions returning to compat mode. Check for these here too. */ 1126zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
1160 leaq irq_return(%rip),%rcx 1127errorentry invalid_TSS do_invalid_TSS
1161 cmpq %rcx,RIP(%rsp) 1128errorentry segment_not_present do_segment_not_present
1162 je error_swapgs 1129zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
1163 movl %ecx,%ecx /* zero extend */ 1130zeroentry coprocessor_error do_coprocessor_error
1164 cmpq %rcx,RIP(%rsp) 1131errorentry alignment_check do_alignment_check
1165 je error_swapgs 1132zeroentry simd_coprocessor_error do_simd_coprocessor_error
1166 cmpq $gs_change,RIP(%rsp) 1133
1167 je error_swapgs 1134 /* Reload gs selector with exception handling */
1168 jmp error_sti 1135 /* edi: new selector */
1169KPROBE_END(error_entry)
1170
1171 /* Reload gs selector with exception handling */
1172 /* edi: new selector */
1173ENTRY(native_load_gs_index) 1136ENTRY(native_load_gs_index)
1174 CFI_STARTPROC 1137 CFI_STARTPROC
1175 pushf 1138 pushf
1176 CFI_ADJUST_CFA_OFFSET 8 1139 CFI_ADJUST_CFA_OFFSET 8
1177 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) 1140 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1178 SWAPGS 1141 SWAPGS
1179gs_change: 1142gs_change:
1180 movl %edi,%gs 1143 movl %edi,%gs
11812: mfence /* workaround */ 11442: mfence /* workaround */
1182 SWAPGS 1145 SWAPGS
1183 popf 1146 popf
1184 CFI_ADJUST_CFA_OFFSET -8 1147 CFI_ADJUST_CFA_OFFSET -8
1185 ret 1148 ret
1186 CFI_ENDPROC 1149 CFI_ENDPROC
1187ENDPROC(native_load_gs_index) 1150END(native_load_gs_index)
1188 1151
1189 .section __ex_table,"a" 1152 .section __ex_table,"a"
1190 .align 8 1153 .align 8
1191 .quad gs_change,bad_gs 1154 .quad gs_change,bad_gs
1192 .previous 1155 .previous
1193 .section .fixup,"ax" 1156 .section .fixup,"ax"
1194 /* running with kernelgs */ 1157 /* running with kernelgs */
1195bad_gs: 1158bad_gs:
1196 SWAPGS /* switch back to user gs */ 1159 SWAPGS /* switch back to user gs */
1197 xorl %eax,%eax 1160 xorl %eax,%eax
1198 movl %eax,%gs 1161 movl %eax,%gs
1199 jmp 2b 1162 jmp 2b
1200 .previous 1163 .previous
1201 1164
1202/* 1165/*
1203 * Create a kernel thread. 1166 * Create a kernel thread.
1204 * 1167 *
@@ -1221,7 +1184,7 @@ ENTRY(kernel_thread)
1221 1184
1222 xorl %r8d,%r8d 1185 xorl %r8d,%r8d
1223 xorl %r9d,%r9d 1186 xorl %r9d,%r9d
1224 1187
1225 # clone now 1188 # clone now
1226 call do_fork 1189 call do_fork
1227 movq %rax,RAX(%rsp) 1190 movq %rax,RAX(%rsp)
@@ -1232,15 +1195,15 @@ ENTRY(kernel_thread)
1232 * so internally to the x86_64 port you can rely on kernel_thread() 1195 * so internally to the x86_64 port you can rely on kernel_thread()
1233 * not to reschedule the child before returning, this avoids the need 1196 * not to reschedule the child before returning, this avoids the need
1234 * of hacks for example to fork off the per-CPU idle tasks. 1197 * of hacks for example to fork off the per-CPU idle tasks.
1235 * [Hopefully no generic code relies on the reschedule -AK] 1198 * [Hopefully no generic code relies on the reschedule -AK]
1236 */ 1199 */
1237 RESTORE_ALL 1200 RESTORE_ALL
1238 UNFAKE_STACK_FRAME 1201 UNFAKE_STACK_FRAME
1239 ret 1202 ret
1240 CFI_ENDPROC 1203 CFI_ENDPROC
1241ENDPROC(kernel_thread) 1204END(kernel_thread)
1242 1205
1243child_rip: 1206ENTRY(child_rip)
1244 pushq $0 # fake return address 1207 pushq $0 # fake return address
1245 CFI_STARTPROC 1208 CFI_STARTPROC
1246 /* 1209 /*
@@ -1253,8 +1216,9 @@ child_rip:
1253 # exit 1216 # exit
1254 mov %eax, %edi 1217 mov %eax, %edi
1255 call do_exit 1218 call do_exit
1219 ud2 # padding for call trace
1256 CFI_ENDPROC 1220 CFI_ENDPROC
1257ENDPROC(child_rip) 1221END(child_rip)
1258 1222
1259/* 1223/*
1260 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. 1224 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
@@ -1274,10 +1238,10 @@ ENDPROC(child_rip)
1274ENTRY(kernel_execve) 1238ENTRY(kernel_execve)
1275 CFI_STARTPROC 1239 CFI_STARTPROC
1276 FAKE_STACK_FRAME $0 1240 FAKE_STACK_FRAME $0
1277 SAVE_ALL 1241 SAVE_ALL
1278 movq %rsp,%rcx 1242 movq %rsp,%rcx
1279 call sys_execve 1243 call sys_execve
1280 movq %rax, RAX(%rsp) 1244 movq %rax, RAX(%rsp)
1281 RESTORE_REST 1245 RESTORE_REST
1282 testq %rax,%rax 1246 testq %rax,%rax
1283 je int_ret_from_sys_call 1247 je int_ret_from_sys_call
@@ -1285,129 +1249,7 @@ ENTRY(kernel_execve)
1285 UNFAKE_STACK_FRAME 1249 UNFAKE_STACK_FRAME
1286 ret 1250 ret
1287 CFI_ENDPROC 1251 CFI_ENDPROC
1288ENDPROC(kernel_execve) 1252END(kernel_execve)
1289
1290KPROBE_ENTRY(page_fault)
1291 errorentry do_page_fault
1292KPROBE_END(page_fault)
1293
1294ENTRY(coprocessor_error)
1295 zeroentry do_coprocessor_error
1296END(coprocessor_error)
1297
1298ENTRY(simd_coprocessor_error)
1299 zeroentry do_simd_coprocessor_error
1300END(simd_coprocessor_error)
1301
1302ENTRY(device_not_available)
1303 zeroentry do_device_not_available
1304END(device_not_available)
1305
1306 /* runs on exception stack */
1307KPROBE_ENTRY(debug)
1308 INTR_FRAME
1309 PARAVIRT_ADJUST_EXCEPTION_FRAME
1310 pushq $0
1311 CFI_ADJUST_CFA_OFFSET 8
1312 paranoidentry do_debug, DEBUG_STACK
1313 paranoidexit
1314KPROBE_END(debug)
1315
1316 /* runs on exception stack */
1317KPROBE_ENTRY(nmi)
1318 INTR_FRAME
1319 PARAVIRT_ADJUST_EXCEPTION_FRAME
1320 pushq $-1
1321 CFI_ADJUST_CFA_OFFSET 8
1322 paranoidentry do_nmi, 0, 0
1323#ifdef CONFIG_TRACE_IRQFLAGS
1324 paranoidexit 0
1325#else
1326 jmp paranoid_exit1
1327 CFI_ENDPROC
1328#endif
1329KPROBE_END(nmi)
1330
1331KPROBE_ENTRY(int3)
1332 INTR_FRAME
1333 PARAVIRT_ADJUST_EXCEPTION_FRAME
1334 pushq $0
1335 CFI_ADJUST_CFA_OFFSET 8
1336 paranoidentry do_int3, DEBUG_STACK
1337 jmp paranoid_exit1
1338 CFI_ENDPROC
1339KPROBE_END(int3)
1340
1341ENTRY(overflow)
1342 zeroentry do_overflow
1343END(overflow)
1344
1345ENTRY(bounds)
1346 zeroentry do_bounds
1347END(bounds)
1348
1349ENTRY(invalid_op)
1350 zeroentry do_invalid_op
1351END(invalid_op)
1352
1353ENTRY(coprocessor_segment_overrun)
1354 zeroentry do_coprocessor_segment_overrun
1355END(coprocessor_segment_overrun)
1356
1357 /* runs on exception stack */
1358ENTRY(double_fault)
1359 XCPT_FRAME
1360 PARAVIRT_ADJUST_EXCEPTION_FRAME
1361 paranoidentry do_double_fault
1362 jmp paranoid_exit1
1363 CFI_ENDPROC
1364END(double_fault)
1365
1366ENTRY(invalid_TSS)
1367 errorentry do_invalid_TSS
1368END(invalid_TSS)
1369
1370ENTRY(segment_not_present)
1371 errorentry do_segment_not_present
1372END(segment_not_present)
1373
1374 /* runs on exception stack */
1375ENTRY(stack_segment)
1376 XCPT_FRAME
1377 PARAVIRT_ADJUST_EXCEPTION_FRAME
1378 paranoidentry do_stack_segment
1379 jmp paranoid_exit1
1380 CFI_ENDPROC
1381END(stack_segment)
1382
1383KPROBE_ENTRY(general_protection)
1384 errorentry do_general_protection
1385KPROBE_END(general_protection)
1386
1387ENTRY(alignment_check)
1388 errorentry do_alignment_check
1389END(alignment_check)
1390
1391ENTRY(divide_error)
1392 zeroentry do_divide_error
1393END(divide_error)
1394
1395ENTRY(spurious_interrupt_bug)
1396 zeroentry do_spurious_interrupt_bug
1397END(spurious_interrupt_bug)
1398
1399#ifdef CONFIG_X86_MCE
1400 /* runs on exception stack */
1401ENTRY(machine_check)
1402 INTR_FRAME
1403 PARAVIRT_ADJUST_EXCEPTION_FRAME
1404 pushq $0
1405 CFI_ADJUST_CFA_OFFSET 8
1406 paranoidentry do_machine_check
1407 jmp paranoid_exit1
1408 CFI_ENDPROC
1409END(machine_check)
1410#endif
1411 1253
1412/* Call softirq on interrupt stack. Interrupts are off. */ 1254/* Call softirq on interrupt stack. Interrupts are off. */
1413ENTRY(call_softirq) 1255ENTRY(call_softirq)
@@ -1427,40 +1269,33 @@ ENTRY(call_softirq)
1427 decl %gs:pda_irqcount 1269 decl %gs:pda_irqcount
1428 ret 1270 ret
1429 CFI_ENDPROC 1271 CFI_ENDPROC
1430ENDPROC(call_softirq) 1272END(call_softirq)
1431
1432KPROBE_ENTRY(ignore_sysret)
1433 CFI_STARTPROC
1434 mov $-ENOSYS,%eax
1435 sysret
1436 CFI_ENDPROC
1437ENDPROC(ignore_sysret)
1438 1273
1439#ifdef CONFIG_XEN 1274#ifdef CONFIG_XEN
1440ENTRY(xen_hypervisor_callback) 1275zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
1441 zeroentry xen_do_hypervisor_callback
1442END(xen_hypervisor_callback)
1443 1276
1444/* 1277/*
1445# A note on the "critical region" in our callback handler. 1278 * A note on the "critical region" in our callback handler.
1446# We want to avoid stacking callback handlers due to events occurring 1279 * We want to avoid stacking callback handlers due to events occurring
1447# during handling of the last event. To do this, we keep events disabled 1280 * during handling of the last event. To do this, we keep events disabled
1448# until we've done all processing. HOWEVER, we must enable events before 1281 * until we've done all processing. HOWEVER, we must enable events before
1449# popping the stack frame (can't be done atomically) and so it would still 1282 * popping the stack frame (can't be done atomically) and so it would still
1450# be possible to get enough handler activations to overflow the stack. 1283 * be possible to get enough handler activations to overflow the stack.
1451# Although unlikely, bugs of that kind are hard to track down, so we'd 1284 * Although unlikely, bugs of that kind are hard to track down, so we'd
1452# like to avoid the possibility. 1285 * like to avoid the possibility.
1453# So, on entry to the handler we detect whether we interrupted an 1286 * So, on entry to the handler we detect whether we interrupted an
1454# existing activation in its critical region -- if so, we pop the current 1287 * existing activation in its critical region -- if so, we pop the current
1455# activation and restart the handler using the previous one. 1288 * activation and restart the handler using the previous one.
1456*/ 1289 */
1457ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) 1290ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1458 CFI_STARTPROC 1291 CFI_STARTPROC
1459/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1292/*
1460 see the correct pointer to the pt_regs */ 1293 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1294 * see the correct pointer to the pt_regs
1295 */
1461 movq %rdi, %rsp # we don't return, adjust the stack frame 1296 movq %rdi, %rsp # we don't return, adjust the stack frame
1462 CFI_ENDPROC 1297 CFI_ENDPROC
1463 CFI_DEFAULT_STACK 1298 DEFAULT_FRAME
146411: incl %gs:pda_irqcount 129911: incl %gs:pda_irqcount
1465 movq %rsp,%rbp 1300 movq %rsp,%rbp
1466 CFI_DEF_CFA_REGISTER rbp 1301 CFI_DEF_CFA_REGISTER rbp
@@ -1475,23 +1310,26 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1475END(do_hypervisor_callback) 1310END(do_hypervisor_callback)
1476 1311
1477/* 1312/*
1478# Hypervisor uses this for application faults while it executes. 1313 * Hypervisor uses this for application faults while it executes.
1479# We get here for two reasons: 1314 * We get here for two reasons:
1480# 1. Fault while reloading DS, ES, FS or GS 1315 * 1. Fault while reloading DS, ES, FS or GS
1481# 2. Fault while executing IRET 1316 * 2. Fault while executing IRET
1482# Category 1 we do not need to fix up as Xen has already reloaded all segment 1317 * Category 1 we do not need to fix up as Xen has already reloaded all segment
1483# registers that could be reloaded and zeroed the others. 1318 * registers that could be reloaded and zeroed the others.
1484# Category 2 we fix up by killing the current process. We cannot use the 1319 * Category 2 we fix up by killing the current process. We cannot use the
1485# normal Linux return path in this case because if we use the IRET hypercall 1320 * normal Linux return path in this case because if we use the IRET hypercall
1486# to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1321 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1487# We distinguish between categories by comparing each saved segment register 1322 * We distinguish between categories by comparing each saved segment register
1488# with its current contents: any discrepancy means we in category 1. 1323 * with its current contents: any discrepancy means we in category 1.
1489*/ 1324 */
1490ENTRY(xen_failsafe_callback) 1325ENTRY(xen_failsafe_callback)
1491 framesz = (RIP-0x30) /* workaround buggy gas */ 1326 INTR_FRAME 1 (6*8)
1492 _frame framesz 1327 /*CFI_REL_OFFSET gs,GS*/
1493 CFI_REL_OFFSET rcx, 0 1328 /*CFI_REL_OFFSET fs,FS*/
1494 CFI_REL_OFFSET r11, 8 1329 /*CFI_REL_OFFSET es,ES*/
1330 /*CFI_REL_OFFSET ds,DS*/
1331 CFI_REL_OFFSET r11,8
1332 CFI_REL_OFFSET rcx,0
1495 movw %ds,%cx 1333 movw %ds,%cx
1496 cmpw %cx,0x10(%rsp) 1334 cmpw %cx,0x10(%rsp)
1497 CFI_REMEMBER_STATE 1335 CFI_REMEMBER_STATE
@@ -1512,12 +1350,9 @@ ENTRY(xen_failsafe_callback)
1512 CFI_RESTORE r11 1350 CFI_RESTORE r11
1513 addq $0x30,%rsp 1351 addq $0x30,%rsp
1514 CFI_ADJUST_CFA_OFFSET -0x30 1352 CFI_ADJUST_CFA_OFFSET -0x30
1515 pushq $0 1353 pushq_cfi $0 /* RIP */
1516 CFI_ADJUST_CFA_OFFSET 8 1354 pushq_cfi %r11
1517 pushq %r11 1355 pushq_cfi %rcx
1518 CFI_ADJUST_CFA_OFFSET 8
1519 pushq %rcx
1520 CFI_ADJUST_CFA_OFFSET 8
1521 jmp general_protection 1356 jmp general_protection
1522 CFI_RESTORE_STATE 1357 CFI_RESTORE_STATE
15231: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 13581: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
@@ -1527,11 +1362,223 @@ ENTRY(xen_failsafe_callback)
1527 CFI_RESTORE r11 1362 CFI_RESTORE r11
1528 addq $0x30,%rsp 1363 addq $0x30,%rsp
1529 CFI_ADJUST_CFA_OFFSET -0x30 1364 CFI_ADJUST_CFA_OFFSET -0x30
1530 pushq $0 1365 pushq_cfi $0
1531 CFI_ADJUST_CFA_OFFSET 8
1532 SAVE_ALL 1366 SAVE_ALL
1533 jmp error_exit 1367 jmp error_exit
1534 CFI_ENDPROC 1368 CFI_ENDPROC
1535END(xen_failsafe_callback) 1369END(xen_failsafe_callback)
1536 1370
1537#endif /* CONFIG_XEN */ 1371#endif /* CONFIG_XEN */
1372
1373/*
1374 * Some functions should be protected against kprobes
1375 */
1376 .pushsection .kprobes.text, "ax"
1377
1378paranoidzeroentry_ist debug do_debug DEBUG_STACK
1379paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
1380paranoiderrorentry stack_segment do_stack_segment
1381errorentry general_protection do_general_protection
1382errorentry page_fault do_page_fault
1383#ifdef CONFIG_X86_MCE
1384paranoidzeroentry machine_check do_machine_check
1385#endif
1386
1387 /*
1388 * "Paranoid" exit path from exception stack.
1389 * Paranoid because this is used by NMIs and cannot take
1390 * any kernel state for granted.
1391 * We don't do kernel preemption checks here, because only
1392 * NMI should be common and it does not enable IRQs and
1393 * cannot get reschedule ticks.
1394 *
1395 * "trace" is 0 for the NMI handler only, because irq-tracing
1396 * is fundamentally NMI-unsafe. (we cannot change the soft and
1397 * hard flags at once, atomically)
1398 */
1399
1400 /* ebx: no swapgs flag */
1401ENTRY(paranoid_exit)
1402 INTR_FRAME
1403 DISABLE_INTERRUPTS(CLBR_NONE)
1404 TRACE_IRQS_OFF
1405 testl %ebx,%ebx /* swapgs needed? */
1406 jnz paranoid_restore
1407 testl $3,CS(%rsp)
1408 jnz paranoid_userspace
1409paranoid_swapgs:
1410 TRACE_IRQS_IRETQ 0
1411 SWAPGS_UNSAFE_STACK
1412paranoid_restore:
1413 RESTORE_ALL 8
1414 jmp irq_return
1415paranoid_userspace:
1416 GET_THREAD_INFO(%rcx)
1417 movl TI_flags(%rcx),%ebx
1418 andl $_TIF_WORK_MASK,%ebx
1419 jz paranoid_swapgs
1420 movq %rsp,%rdi /* &pt_regs */
1421 call sync_regs
1422 movq %rax,%rsp /* switch stack for scheduling */
1423 testl $_TIF_NEED_RESCHED,%ebx
1424 jnz paranoid_schedule
1425 movl %ebx,%edx /* arg3: thread flags */
1426 TRACE_IRQS_ON
1427 ENABLE_INTERRUPTS(CLBR_NONE)
1428 xorl %esi,%esi /* arg2: oldset */
1429 movq %rsp,%rdi /* arg1: &pt_regs */
1430 call do_notify_resume
1431 DISABLE_INTERRUPTS(CLBR_NONE)
1432 TRACE_IRQS_OFF
1433 jmp paranoid_userspace
1434paranoid_schedule:
1435 TRACE_IRQS_ON
1436 ENABLE_INTERRUPTS(CLBR_ANY)
1437 call schedule
1438 DISABLE_INTERRUPTS(CLBR_ANY)
1439 TRACE_IRQS_OFF
1440 jmp paranoid_userspace
1441 CFI_ENDPROC
1442END(paranoid_exit)
1443
1444/*
1445 * Exception entry point. This expects an error code/orig_rax on the stack.
1446 * returns in "no swapgs flag" in %ebx.
1447 */
1448ENTRY(error_entry)
1449 XCPT_FRAME
1450 CFI_ADJUST_CFA_OFFSET 15*8
1451 /* oldrax contains error code */
1452 cld
1453 movq_cfi rdi, RDI+8
1454 movq_cfi rsi, RSI+8
1455 movq_cfi rdx, RDX+8
1456 movq_cfi rcx, RCX+8
1457 movq_cfi rax, RAX+8
1458 movq_cfi r8, R8+8
1459 movq_cfi r9, R9+8
1460 movq_cfi r10, R10+8
1461 movq_cfi r11, R11+8
1462 movq_cfi rbx, RBX+8
1463 movq_cfi rbp, RBP+8
1464 movq_cfi r12, R12+8
1465 movq_cfi r13, R13+8
1466 movq_cfi r14, R14+8
1467 movq_cfi r15, R15+8
1468 xorl %ebx,%ebx
1469 testl $3,CS+8(%rsp)
1470 je error_kernelspace
1471error_swapgs:
1472 SWAPGS
1473error_sti:
1474 TRACE_IRQS_OFF
1475 ret
1476 CFI_ENDPROC
1477
1478/*
1479 * There are two places in the kernel that can potentially fault with
1480 * usergs. Handle them here. The exception handlers after iret run with
1481 * kernel gs again, so don't set the user space flag. B stepping K8s
1482 * sometimes report an truncated RIP for IRET exceptions returning to
1483 * compat mode. Check for these here too.
1484 */
1485error_kernelspace:
1486 incl %ebx
1487 leaq irq_return(%rip),%rcx
1488 cmpq %rcx,RIP+8(%rsp)
1489 je error_swapgs
1490 movl %ecx,%ecx /* zero extend */
1491 cmpq %rcx,RIP+8(%rsp)
1492 je error_swapgs
1493 cmpq $gs_change,RIP+8(%rsp)
1494 je error_swapgs
1495 jmp error_sti
1496END(error_entry)
1497
1498
1499/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1500ENTRY(error_exit)
1501 DEFAULT_FRAME
1502 movl %ebx,%eax
1503 RESTORE_REST
1504 DISABLE_INTERRUPTS(CLBR_NONE)
1505 TRACE_IRQS_OFF
1506 GET_THREAD_INFO(%rcx)
1507 testl %eax,%eax
1508 jne retint_kernel
1509 LOCKDEP_SYS_EXIT_IRQ
1510 movl TI_flags(%rcx),%edx
1511 movl $_TIF_WORK_MASK,%edi
1512 andl %edi,%edx
1513 jnz retint_careful
1514 jmp retint_swapgs
1515 CFI_ENDPROC
1516END(error_exit)
1517
1518
1519 /* runs on exception stack */
1520ENTRY(nmi)
1521 INTR_FRAME
1522 PARAVIRT_ADJUST_EXCEPTION_FRAME
1523 pushq_cfi $-1
1524 subq $15*8, %rsp
1525 CFI_ADJUST_CFA_OFFSET 15*8
1526 call save_paranoid
1527 DEFAULT_FRAME 0
1528 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1529 movq %rsp,%rdi
1530 movq $-1,%rsi
1531 call do_nmi
1532#ifdef CONFIG_TRACE_IRQFLAGS
1533 /* paranoidexit; without TRACE_IRQS_OFF */
1534 /* ebx: no swapgs flag */
1535 DISABLE_INTERRUPTS(CLBR_NONE)
1536 testl %ebx,%ebx /* swapgs needed? */
1537 jnz nmi_restore
1538 testl $3,CS(%rsp)
1539 jnz nmi_userspace
1540nmi_swapgs:
1541 SWAPGS_UNSAFE_STACK
1542nmi_restore:
1543 RESTORE_ALL 8
1544 jmp irq_return
1545nmi_userspace:
1546 GET_THREAD_INFO(%rcx)
1547 movl TI_flags(%rcx),%ebx
1548 andl $_TIF_WORK_MASK,%ebx
1549 jz nmi_swapgs
1550 movq %rsp,%rdi /* &pt_regs */
1551 call sync_regs
1552 movq %rax,%rsp /* switch stack for scheduling */
1553 testl $_TIF_NEED_RESCHED,%ebx
1554 jnz nmi_schedule
1555 movl %ebx,%edx /* arg3: thread flags */
1556 ENABLE_INTERRUPTS(CLBR_NONE)
1557 xorl %esi,%esi /* arg2: oldset */
1558 movq %rsp,%rdi /* arg1: &pt_regs */
1559 call do_notify_resume
1560 DISABLE_INTERRUPTS(CLBR_NONE)
1561 jmp nmi_userspace
1562nmi_schedule:
1563 ENABLE_INTERRUPTS(CLBR_ANY)
1564 call schedule
1565 DISABLE_INTERRUPTS(CLBR_ANY)
1566 jmp nmi_userspace
1567 CFI_ENDPROC
1568#else
1569 jmp paranoid_exit
1570 CFI_ENDPROC
1571#endif
1572END(nmi)
1573
1574ENTRY(ignore_sysret)
1575 CFI_STARTPROC
1576 mov $-ENOSYS,%eax
1577 sysret
1578 CFI_ENDPROC
1579END(ignore_sysret)
1580
1581/*
1582 * End of kprobes section
1583 */
1584 .popsection