aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/entry_64.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r--arch/x86/kernel/entry_64.S1458
1 files changed, 794 insertions, 664 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index b86f332c96a6..e28c7a987793 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -11,15 +11,15 @@
11 * 11 *
12 * NOTE: This code handles signal-recognition, which happens every time 12 * NOTE: This code handles signal-recognition, which happens every time
13 * after an interrupt and after each system call. 13 * after an interrupt and after each system call.
14 * 14 *
15 * Normal syscalls and interrupts don't save a full stack frame, this is 15 * Normal syscalls and interrupts don't save a full stack frame, this is
16 * only done for syscall tracing, signals or fork/exec et.al. 16 * only done for syscall tracing, signals or fork/exec et.al.
17 * 17 *
18 * A note on terminology: 18 * A note on terminology:
19 * - top of stack: Architecture defined interrupt frame from SS to RIP 19 * - top of stack: Architecture defined interrupt frame from SS to RIP
20 * at the top of the kernel process stack. 20 * at the top of the kernel process stack.
21 * - partial stack frame: partially saved registers upto R11. 21 * - partial stack frame: partially saved registers upto R11.
22 * - full stack frame: Like partial stack frame, but all register saved. 22 * - full stack frame: Like partial stack frame, but all register saved.
23 * 23 *
24 * Some macro usage: 24 * Some macro usage:
25 * - CFI macros are used to generate dwarf2 unwind information for better 25 * - CFI macros are used to generate dwarf2 unwind information for better
@@ -60,7 +60,6 @@
60#define __AUDIT_ARCH_LE 0x40000000 60#define __AUDIT_ARCH_LE 0x40000000
61 61
62 .code64 62 .code64
63
64#ifdef CONFIG_FUNCTION_TRACER 63#ifdef CONFIG_FUNCTION_TRACER
65#ifdef CONFIG_DYNAMIC_FTRACE 64#ifdef CONFIG_DYNAMIC_FTRACE
66ENTRY(mcount) 65ENTRY(mcount)
@@ -68,16 +67,10 @@ ENTRY(mcount)
68END(mcount) 67END(mcount)
69 68
70ENTRY(ftrace_caller) 69ENTRY(ftrace_caller)
70 cmpl $0, function_trace_stop
71 jne ftrace_stub
71 72
72 /* taken from glibc */ 73 MCOUNT_SAVE_FRAME
73 subq $0x38, %rsp
74 movq %rax, (%rsp)
75 movq %rcx, 8(%rsp)
76 movq %rdx, 16(%rsp)
77 movq %rsi, 24(%rsp)
78 movq %rdi, 32(%rsp)
79 movq %r8, 40(%rsp)
80 movq %r9, 48(%rsp)
81 74
82 movq 0x38(%rsp), %rdi 75 movq 0x38(%rsp), %rdi
83 movq 8(%rbp), %rsi 76 movq 8(%rbp), %rsi
@@ -87,14 +80,13 @@ ENTRY(ftrace_caller)
87ftrace_call: 80ftrace_call:
88 call ftrace_stub 81 call ftrace_stub
89 82
90 movq 48(%rsp), %r9 83 MCOUNT_RESTORE_FRAME
91 movq 40(%rsp), %r8 84
92 movq 32(%rsp), %rdi 85#ifdef CONFIG_FUNCTION_GRAPH_TRACER
93 movq 24(%rsp), %rsi 86.globl ftrace_graph_call
94 movq 16(%rsp), %rdx 87ftrace_graph_call:
95 movq 8(%rsp), %rcx 88 jmp ftrace_stub
96 movq (%rsp), %rax 89#endif
97 addq $0x38, %rsp
98 90
99.globl ftrace_stub 91.globl ftrace_stub
100ftrace_stub: 92ftrace_stub:
@@ -103,15 +95,63 @@ END(ftrace_caller)
103 95
104#else /* ! CONFIG_DYNAMIC_FTRACE */ 96#else /* ! CONFIG_DYNAMIC_FTRACE */
105ENTRY(mcount) 97ENTRY(mcount)
98 cmpl $0, function_trace_stop
99 jne ftrace_stub
100
106 cmpq $ftrace_stub, ftrace_trace_function 101 cmpq $ftrace_stub, ftrace_trace_function
107 jnz trace 102 jnz trace
103
104#ifdef CONFIG_FUNCTION_GRAPH_TRACER
105 cmpq $ftrace_stub, ftrace_graph_return
106 jnz ftrace_graph_caller
107
108 cmpq $ftrace_graph_entry_stub, ftrace_graph_entry
109 jnz ftrace_graph_caller
110#endif
111
108.globl ftrace_stub 112.globl ftrace_stub
109ftrace_stub: 113ftrace_stub:
110 retq 114 retq
111 115
112trace: 116trace:
113 /* taken from glibc */ 117 MCOUNT_SAVE_FRAME
114 subq $0x38, %rsp 118
119 movq 0x38(%rsp), %rdi
120 movq 8(%rbp), %rsi
121 subq $MCOUNT_INSN_SIZE, %rdi
122
123 call *ftrace_trace_function
124
125 MCOUNT_RESTORE_FRAME
126
127 jmp ftrace_stub
128END(mcount)
129#endif /* CONFIG_DYNAMIC_FTRACE */
130#endif /* CONFIG_FUNCTION_TRACER */
131
132#ifdef CONFIG_FUNCTION_GRAPH_TRACER
133ENTRY(ftrace_graph_caller)
134 cmpl $0, function_trace_stop
135 jne ftrace_stub
136
137 MCOUNT_SAVE_FRAME
138
139 leaq 8(%rbp), %rdi
140 movq 0x38(%rsp), %rsi
141 subq $MCOUNT_INSN_SIZE, %rsi
142
143 call prepare_ftrace_return
144
145 MCOUNT_RESTORE_FRAME
146
147 retq
148END(ftrace_graph_caller)
149
150
151.globl return_to_handler
152return_to_handler:
153 subq $80, %rsp
154
115 movq %rax, (%rsp) 155 movq %rax, (%rsp)
116 movq %rcx, 8(%rsp) 156 movq %rcx, 8(%rsp)
117 movq %rdx, 16(%rsp) 157 movq %rdx, 16(%rsp)
@@ -119,13 +159,14 @@ trace:
119 movq %rdi, 32(%rsp) 159 movq %rdi, 32(%rsp)
120 movq %r8, 40(%rsp) 160 movq %r8, 40(%rsp)
121 movq %r9, 48(%rsp) 161 movq %r9, 48(%rsp)
162 movq %r10, 56(%rsp)
163 movq %r11, 64(%rsp)
122 164
123 movq 0x38(%rsp), %rdi 165 call ftrace_return_to_handler
124 movq 8(%rbp), %rsi
125 subq $MCOUNT_INSN_SIZE, %rdi
126
127 call *ftrace_trace_function
128 166
167 movq %rax, 72(%rsp)
168 movq 64(%rsp), %r11
169 movq 56(%rsp), %r10
129 movq 48(%rsp), %r9 170 movq 48(%rsp), %r9
130 movq 40(%rsp), %r8 171 movq 40(%rsp), %r8
131 movq 32(%rsp), %rdi 172 movq 32(%rsp), %rdi
@@ -133,16 +174,14 @@ trace:
133 movq 16(%rsp), %rdx 174 movq 16(%rsp), %rdx
134 movq 8(%rsp), %rcx 175 movq 8(%rsp), %rcx
135 movq (%rsp), %rax 176 movq (%rsp), %rax
136 addq $0x38, %rsp 177 addq $72, %rsp
178 retq
179#endif
137 180
138 jmp ftrace_stub
139END(mcount)
140#endif /* CONFIG_DYNAMIC_FTRACE */
141#endif /* CONFIG_FUNCTION_TRACER */
142 181
143#ifndef CONFIG_PREEMPT 182#ifndef CONFIG_PREEMPT
144#define retint_kernel retint_restore_args 183#define retint_kernel retint_restore_args
145#endif 184#endif
146 185
147#ifdef CONFIG_PARAVIRT 186#ifdef CONFIG_PARAVIRT
148ENTRY(native_usergs_sysret64) 187ENTRY(native_usergs_sysret64)
@@ -161,29 +200,29 @@ ENTRY(native_usergs_sysret64)
161.endm 200.endm
162 201
163/* 202/*
164 * C code is not supposed to know about undefined top of stack. Every time 203 * C code is not supposed to know about undefined top of stack. Every time
165 * a C function with an pt_regs argument is called from the SYSCALL based 204 * a C function with an pt_regs argument is called from the SYSCALL based
166 * fast path FIXUP_TOP_OF_STACK is needed. 205 * fast path FIXUP_TOP_OF_STACK is needed.
167 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs 206 * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
168 * manipulation. 207 * manipulation.
169 */ 208 */
170 209
171 /* %rsp:at FRAMEEND */ 210 /* %rsp:at FRAMEEND */
172 .macro FIXUP_TOP_OF_STACK tmp 211 .macro FIXUP_TOP_OF_STACK tmp offset=0
173 movq %gs:pda_oldrsp,\tmp 212 movq %gs:pda_oldrsp,\tmp
174 movq \tmp,RSP(%rsp) 213 movq \tmp,RSP+\offset(%rsp)
175 movq $__USER_DS,SS(%rsp) 214 movq $__USER_DS,SS+\offset(%rsp)
176 movq $__USER_CS,CS(%rsp) 215 movq $__USER_CS,CS+\offset(%rsp)
177 movq $-1,RCX(%rsp) 216 movq $-1,RCX+\offset(%rsp)
178 movq R11(%rsp),\tmp /* get eflags */ 217 movq R11+\offset(%rsp),\tmp /* get eflags */
179 movq \tmp,EFLAGS(%rsp) 218 movq \tmp,EFLAGS+\offset(%rsp)
180 .endm 219 .endm
181 220
182 .macro RESTORE_TOP_OF_STACK tmp,offset=0 221 .macro RESTORE_TOP_OF_STACK tmp offset=0
183 movq RSP-\offset(%rsp),\tmp 222 movq RSP+\offset(%rsp),\tmp
184 movq \tmp,%gs:pda_oldrsp 223 movq \tmp,%gs:pda_oldrsp
185 movq EFLAGS-\offset(%rsp),\tmp 224 movq EFLAGS+\offset(%rsp),\tmp
186 movq \tmp,R11-\offset(%rsp) 225 movq \tmp,R11+\offset(%rsp)
187 .endm 226 .endm
188 227
189 .macro FAKE_STACK_FRAME child_rip 228 .macro FAKE_STACK_FRAME child_rip
@@ -195,7 +234,7 @@ ENTRY(native_usergs_sysret64)
195 pushq %rax /* rsp */ 234 pushq %rax /* rsp */
196 CFI_ADJUST_CFA_OFFSET 8 235 CFI_ADJUST_CFA_OFFSET 8
197 CFI_REL_OFFSET rsp,0 236 CFI_REL_OFFSET rsp,0
198 pushq $(1<<9) /* eflags - interrupts on */ 237 pushq $X86_EFLAGS_IF /* eflags - interrupts on */
199 CFI_ADJUST_CFA_OFFSET 8 238 CFI_ADJUST_CFA_OFFSET 8
200 /*CFI_REL_OFFSET rflags,0*/ 239 /*CFI_REL_OFFSET rflags,0*/
201 pushq $__KERNEL_CS /* cs */ 240 pushq $__KERNEL_CS /* cs */
@@ -213,62 +252,184 @@ ENTRY(native_usergs_sysret64)
213 CFI_ADJUST_CFA_OFFSET -(6*8) 252 CFI_ADJUST_CFA_OFFSET -(6*8)
214 .endm 253 .endm
215 254
216 .macro CFI_DEFAULT_STACK start=1 255/*
256 * initial frame state for interrupts (and exceptions without error code)
257 */
258 .macro EMPTY_FRAME start=1 offset=0
217 .if \start 259 .if \start
218 CFI_STARTPROC simple 260 CFI_STARTPROC simple
219 CFI_SIGNAL_FRAME 261 CFI_SIGNAL_FRAME
220 CFI_DEF_CFA rsp,SS+8 262 CFI_DEF_CFA rsp,8+\offset
221 .else 263 .else
222 CFI_DEF_CFA_OFFSET SS+8 264 CFI_DEF_CFA_OFFSET 8+\offset
223 .endif 265 .endif
224 CFI_REL_OFFSET r15,R15
225 CFI_REL_OFFSET r14,R14
226 CFI_REL_OFFSET r13,R13
227 CFI_REL_OFFSET r12,R12
228 CFI_REL_OFFSET rbp,RBP
229 CFI_REL_OFFSET rbx,RBX
230 CFI_REL_OFFSET r11,R11
231 CFI_REL_OFFSET r10,R10
232 CFI_REL_OFFSET r9,R9
233 CFI_REL_OFFSET r8,R8
234 CFI_REL_OFFSET rax,RAX
235 CFI_REL_OFFSET rcx,RCX
236 CFI_REL_OFFSET rdx,RDX
237 CFI_REL_OFFSET rsi,RSI
238 CFI_REL_OFFSET rdi,RDI
239 CFI_REL_OFFSET rip,RIP
240 /*CFI_REL_OFFSET cs,CS*/
241 /*CFI_REL_OFFSET rflags,EFLAGS*/
242 CFI_REL_OFFSET rsp,RSP
243 /*CFI_REL_OFFSET ss,SS*/
244 .endm 266 .endm
267
268/*
269 * initial frame state for interrupts (and exceptions without error code)
270 */
271 .macro INTR_FRAME start=1 offset=0
272 EMPTY_FRAME \start, SS+8+\offset-RIP
273 /*CFI_REL_OFFSET ss, SS+\offset-RIP*/
274 CFI_REL_OFFSET rsp, RSP+\offset-RIP
275 /*CFI_REL_OFFSET rflags, EFLAGS+\offset-RIP*/
276 /*CFI_REL_OFFSET cs, CS+\offset-RIP*/
277 CFI_REL_OFFSET rip, RIP+\offset-RIP
278 .endm
279
280/*
281 * initial frame state for exceptions with error code (and interrupts
282 * with vector already pushed)
283 */
284 .macro XCPT_FRAME start=1 offset=0
285 INTR_FRAME \start, RIP+\offset-ORIG_RAX
286 /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
287 .endm
288
245/* 289/*
246 * A newly forked process directly context switches into this. 290 * frame that enables calling into C.
247 */ 291 */
248/* rdi: prev */ 292 .macro PARTIAL_FRAME start=1 offset=0
293 XCPT_FRAME \start, ORIG_RAX+\offset-ARGOFFSET
294 CFI_REL_OFFSET rdi, RDI+\offset-ARGOFFSET
295 CFI_REL_OFFSET rsi, RSI+\offset-ARGOFFSET
296 CFI_REL_OFFSET rdx, RDX+\offset-ARGOFFSET
297 CFI_REL_OFFSET rcx, RCX+\offset-ARGOFFSET
298 CFI_REL_OFFSET rax, RAX+\offset-ARGOFFSET
299 CFI_REL_OFFSET r8, R8+\offset-ARGOFFSET
300 CFI_REL_OFFSET r9, R9+\offset-ARGOFFSET
301 CFI_REL_OFFSET r10, R10+\offset-ARGOFFSET
302 CFI_REL_OFFSET r11, R11+\offset-ARGOFFSET
303 .endm
304
305/*
306 * frame that enables passing a complete pt_regs to a C function.
307 */
308 .macro DEFAULT_FRAME start=1 offset=0
309 PARTIAL_FRAME \start, R11+\offset-R15
310 CFI_REL_OFFSET rbx, RBX+\offset
311 CFI_REL_OFFSET rbp, RBP+\offset
312 CFI_REL_OFFSET r12, R12+\offset
313 CFI_REL_OFFSET r13, R13+\offset
314 CFI_REL_OFFSET r14, R14+\offset
315 CFI_REL_OFFSET r15, R15+\offset
316 .endm
317
318/* save partial stack frame */
319ENTRY(save_args)
320 XCPT_FRAME
321 cld
322 movq_cfi rdi, RDI+16-ARGOFFSET
323 movq_cfi rsi, RSI+16-ARGOFFSET
324 movq_cfi rdx, RDX+16-ARGOFFSET
325 movq_cfi rcx, RCX+16-ARGOFFSET
326 movq_cfi rax, RAX+16-ARGOFFSET
327 movq_cfi r8, R8+16-ARGOFFSET
328 movq_cfi r9, R9+16-ARGOFFSET
329 movq_cfi r10, R10+16-ARGOFFSET
330 movq_cfi r11, R11+16-ARGOFFSET
331
332 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
333 movq_cfi rbp, 8 /* push %rbp */
334 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
335 testl $3, CS(%rdi)
336 je 1f
337 SWAPGS
338 /*
339 * irqcount is used to check if a CPU is already on an interrupt stack
340 * or not. While this is essentially redundant with preempt_count it is
341 * a little cheaper to use a separate counter in the PDA (short of
342 * moving irq_enter into assembly, which would be too much work)
343 */
3441: incl %gs:pda_irqcount
345 jne 2f
346 popq_cfi %rax /* move return address... */
347 mov %gs:pda_irqstackptr,%rsp
348 EMPTY_FRAME 0
349 pushq_cfi %rax /* ... to the new stack */
350 /*
351 * We entered an interrupt context - irqs are off:
352 */
3532: TRACE_IRQS_OFF
354 ret
355 CFI_ENDPROC
356END(save_args)
357
358ENTRY(save_rest)
359 PARTIAL_FRAME 1 REST_SKIP+8
360 movq 5*8+16(%rsp), %r11 /* save return address */
361 movq_cfi rbx, RBX+16
362 movq_cfi rbp, RBP+16
363 movq_cfi r12, R12+16
364 movq_cfi r13, R13+16
365 movq_cfi r14, R14+16
366 movq_cfi r15, R15+16
367 movq %r11, 8(%rsp) /* return address */
368 FIXUP_TOP_OF_STACK %r11, 16
369 ret
370 CFI_ENDPROC
371END(save_rest)
372
373/* save complete stack frame */
374ENTRY(save_paranoid)
375 XCPT_FRAME 1 RDI+8
376 cld
377 movq_cfi rdi, RDI+8
378 movq_cfi rsi, RSI+8
379 movq_cfi rdx, RDX+8
380 movq_cfi rcx, RCX+8
381 movq_cfi rax, RAX+8
382 movq_cfi r8, R8+8
383 movq_cfi r9, R9+8
384 movq_cfi r10, R10+8
385 movq_cfi r11, R11+8
386 movq_cfi rbx, RBX+8
387 movq_cfi rbp, RBP+8
388 movq_cfi r12, R12+8
389 movq_cfi r13, R13+8
390 movq_cfi r14, R14+8
391 movq_cfi r15, R15+8
392 movl $1,%ebx
393 movl $MSR_GS_BASE,%ecx
394 rdmsr
395 testl %edx,%edx
396 js 1f /* negative -> in kernel */
397 SWAPGS
398 xorl %ebx,%ebx
3991: ret
400 CFI_ENDPROC
401END(save_paranoid)
402
403/*
404 * A newly forked process directly context switches into this address.
405 *
406 * rdi: prev task we switched from
407 */
249ENTRY(ret_from_fork) 408ENTRY(ret_from_fork)
250 CFI_DEFAULT_STACK 409 DEFAULT_FRAME
410
251 push kernel_eflags(%rip) 411 push kernel_eflags(%rip)
252 CFI_ADJUST_CFA_OFFSET 8 412 CFI_ADJUST_CFA_OFFSET 8
253 popf # reset kernel eflags 413 popf # reset kernel eflags
254 CFI_ADJUST_CFA_OFFSET -8 414 CFI_ADJUST_CFA_OFFSET -8
255 call schedule_tail 415
416 call schedule_tail # rdi: 'prev' task parameter
417
256 GET_THREAD_INFO(%rcx) 418 GET_THREAD_INFO(%rcx)
257 testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) 419
258 jnz rff_trace 420 CFI_REMEMBER_STATE
259rff_action:
260 RESTORE_REST 421 RESTORE_REST
261 testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread? 422
423 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
262 je int_ret_from_sys_call 424 je int_ret_from_sys_call
263 testl $_TIF_IA32,TI_flags(%rcx) 425
426 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
264 jnz int_ret_from_sys_call 427 jnz int_ret_from_sys_call
265 RESTORE_TOP_OF_STACK %rdi,ARGOFFSET 428
266 jmp ret_from_sys_call 429 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
267rff_trace: 430 jmp ret_from_sys_call # go to the SYSRET fastpath
268 movq %rsp,%rdi 431
269 call syscall_trace_leave 432 CFI_RESTORE_STATE
270 GET_THREAD_INFO(%rcx)
271 jmp rff_action
272 CFI_ENDPROC 433 CFI_ENDPROC
273END(ret_from_fork) 434END(ret_from_fork)
274 435
@@ -278,20 +439,20 @@ END(ret_from_fork)
278 * SYSCALL does not save anything on the stack and does not change the 439 * SYSCALL does not save anything on the stack and does not change the
279 * stack pointer. 440 * stack pointer.
280 */ 441 */
281 442
282/* 443/*
283 * Register setup: 444 * Register setup:
284 * rax system call number 445 * rax system call number
285 * rdi arg0 446 * rdi arg0
286 * rcx return address for syscall/sysret, C arg3 447 * rcx return address for syscall/sysret, C arg3
287 * rsi arg1 448 * rsi arg1
288 * rdx arg2 449 * rdx arg2
289 * r10 arg3 (--> moved to rcx for C) 450 * r10 arg3 (--> moved to rcx for C)
290 * r8 arg4 451 * r8 arg4
291 * r9 arg5 452 * r9 arg5
292 * r11 eflags for syscall/sysret, temporary for C 453 * r11 eflags for syscall/sysret, temporary for C
293 * r12-r15,rbp,rbx saved by C code, not touched. 454 * r12-r15,rbp,rbx saved by C code, not touched.
294 * 455 *
295 * Interrupts are off on entry. 456 * Interrupts are off on entry.
296 * Only called from user space. 457 * Only called from user space.
297 * 458 *
@@ -301,7 +462,7 @@ END(ret_from_fork)
301 * When user can change the frames always force IRET. That is because 462 * When user can change the frames always force IRET. That is because
302 * it deals with uncanonical addresses better. SYSRET has trouble 463 * it deals with uncanonical addresses better. SYSRET has trouble
303 * with them due to bugs in both AMD and Intel CPUs. 464 * with them due to bugs in both AMD and Intel CPUs.
304 */ 465 */
305 466
306ENTRY(system_call) 467ENTRY(system_call)
307 CFI_STARTPROC simple 468 CFI_STARTPROC simple
@@ -317,7 +478,7 @@ ENTRY(system_call)
317 */ 478 */
318ENTRY(system_call_after_swapgs) 479ENTRY(system_call_after_swapgs)
319 480
320 movq %rsp,%gs:pda_oldrsp 481 movq %rsp,%gs:pda_oldrsp
321 movq %gs:pda_kernelstack,%rsp 482 movq %gs:pda_kernelstack,%rsp
322 /* 483 /*
323 * No need to follow this irqs off/on section - it's straight 484 * No need to follow this irqs off/on section - it's straight
@@ -325,7 +486,7 @@ ENTRY(system_call_after_swapgs)
325 */ 486 */
326 ENABLE_INTERRUPTS(CLBR_NONE) 487 ENABLE_INTERRUPTS(CLBR_NONE)
327 SAVE_ARGS 8,1 488 SAVE_ARGS 8,1
328 movq %rax,ORIG_RAX-ARGOFFSET(%rsp) 489 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
329 movq %rcx,RIP-ARGOFFSET(%rsp) 490 movq %rcx,RIP-ARGOFFSET(%rsp)
330 CFI_REL_OFFSET rip,RIP-ARGOFFSET 491 CFI_REL_OFFSET rip,RIP-ARGOFFSET
331 GET_THREAD_INFO(%rcx) 492 GET_THREAD_INFO(%rcx)
@@ -339,19 +500,19 @@ system_call_fastpath:
339 movq %rax,RAX-ARGOFFSET(%rsp) 500 movq %rax,RAX-ARGOFFSET(%rsp)
340/* 501/*
341 * Syscall return path ending with SYSRET (fast path) 502 * Syscall return path ending with SYSRET (fast path)
342 * Has incomplete stack frame and undefined top of stack. 503 * Has incomplete stack frame and undefined top of stack.
343 */ 504 */
344ret_from_sys_call: 505ret_from_sys_call:
345 movl $_TIF_ALLWORK_MASK,%edi 506 movl $_TIF_ALLWORK_MASK,%edi
346 /* edi: flagmask */ 507 /* edi: flagmask */
347sysret_check: 508sysret_check:
348 LOCKDEP_SYS_EXIT 509 LOCKDEP_SYS_EXIT
349 GET_THREAD_INFO(%rcx) 510 GET_THREAD_INFO(%rcx)
350 DISABLE_INTERRUPTS(CLBR_NONE) 511 DISABLE_INTERRUPTS(CLBR_NONE)
351 TRACE_IRQS_OFF 512 TRACE_IRQS_OFF
352 movl TI_flags(%rcx),%edx 513 movl TI_flags(%rcx),%edx
353 andl %edi,%edx 514 andl %edi,%edx
354 jnz sysret_careful 515 jnz sysret_careful
355 CFI_REMEMBER_STATE 516 CFI_REMEMBER_STATE
356 /* 517 /*
357 * sysretq will re-enable interrupts: 518 * sysretq will re-enable interrupts:
@@ -366,7 +527,7 @@ sysret_check:
366 527
367 CFI_RESTORE_STATE 528 CFI_RESTORE_STATE
368 /* Handle reschedules */ 529 /* Handle reschedules */
369 /* edx: work, edi: workmask */ 530 /* edx: work, edi: workmask */
370sysret_careful: 531sysret_careful:
371 bt $TIF_NEED_RESCHED,%edx 532 bt $TIF_NEED_RESCHED,%edx
372 jnc sysret_signal 533 jnc sysret_signal
@@ -379,7 +540,7 @@ sysret_careful:
379 CFI_ADJUST_CFA_OFFSET -8 540 CFI_ADJUST_CFA_OFFSET -8
380 jmp sysret_check 541 jmp sysret_check
381 542
382 /* Handle a signal */ 543 /* Handle a signal */
383sysret_signal: 544sysret_signal:
384 TRACE_IRQS_ON 545 TRACE_IRQS_ON
385 ENABLE_INTERRUPTS(CLBR_NONE) 546 ENABLE_INTERRUPTS(CLBR_NONE)
@@ -388,17 +549,20 @@ sysret_signal:
388 jc sysret_audit 549 jc sysret_audit
389#endif 550#endif
390 /* edx: work flags (arg3) */ 551 /* edx: work flags (arg3) */
391 leaq do_notify_resume(%rip),%rax
392 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1 552 leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
393 xorl %esi,%esi # oldset -> arg2 553 xorl %esi,%esi # oldset -> arg2
394 call ptregscall_common 554 SAVE_REST
555 FIXUP_TOP_OF_STACK %r11
556 call do_notify_resume
557 RESTORE_TOP_OF_STACK %r11
558 RESTORE_REST
395 movl $_TIF_WORK_MASK,%edi 559 movl $_TIF_WORK_MASK,%edi
396 /* Use IRET because user could have changed frame. This 560 /* Use IRET because user could have changed frame. This
397 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */ 561 works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
398 DISABLE_INTERRUPTS(CLBR_NONE) 562 DISABLE_INTERRUPTS(CLBR_NONE)
399 TRACE_IRQS_OFF 563 TRACE_IRQS_OFF
400 jmp int_with_check 564 jmp int_with_check
401 565
402badsys: 566badsys:
403 movq $-ENOSYS,RAX-ARGOFFSET(%rsp) 567 movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
404 jmp ret_from_sys_call 568 jmp ret_from_sys_call
@@ -437,7 +601,7 @@ sysret_audit:
437#endif /* CONFIG_AUDITSYSCALL */ 601#endif /* CONFIG_AUDITSYSCALL */
438 602
439 /* Do syscall tracing */ 603 /* Do syscall tracing */
440tracesys: 604tracesys:
441#ifdef CONFIG_AUDITSYSCALL 605#ifdef CONFIG_AUDITSYSCALL
442 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx) 606 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
443 jz auditsys 607 jz auditsys
@@ -460,8 +624,8 @@ tracesys:
460 call *sys_call_table(,%rax,8) 624 call *sys_call_table(,%rax,8)
461 movq %rax,RAX-ARGOFFSET(%rsp) 625 movq %rax,RAX-ARGOFFSET(%rsp)
462 /* Use IRET because user could have changed frame */ 626 /* Use IRET because user could have changed frame */
463 627
464/* 628/*
465 * Syscall return path ending with IRET. 629 * Syscall return path ending with IRET.
466 * Has correct top of stack, but partial stack frame. 630 * Has correct top of stack, but partial stack frame.
467 */ 631 */
@@ -505,18 +669,18 @@ int_very_careful:
505 TRACE_IRQS_ON 669 TRACE_IRQS_ON
506 ENABLE_INTERRUPTS(CLBR_NONE) 670 ENABLE_INTERRUPTS(CLBR_NONE)
507 SAVE_REST 671 SAVE_REST
508 /* Check for syscall exit trace */ 672 /* Check for syscall exit trace */
509 testl $_TIF_WORK_SYSCALL_EXIT,%edx 673 testl $_TIF_WORK_SYSCALL_EXIT,%edx
510 jz int_signal 674 jz int_signal
511 pushq %rdi 675 pushq %rdi
512 CFI_ADJUST_CFA_OFFSET 8 676 CFI_ADJUST_CFA_OFFSET 8
513 leaq 8(%rsp),%rdi # &ptregs -> arg1 677 leaq 8(%rsp),%rdi # &ptregs -> arg1
514 call syscall_trace_leave 678 call syscall_trace_leave
515 popq %rdi 679 popq %rdi
516 CFI_ADJUST_CFA_OFFSET -8 680 CFI_ADJUST_CFA_OFFSET -8
517 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi 681 andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
518 jmp int_restore_rest 682 jmp int_restore_rest
519 683
520int_signal: 684int_signal:
521 testl $_TIF_DO_NOTIFY_MASK,%edx 685 testl $_TIF_DO_NOTIFY_MASK,%edx
522 jz 1f 686 jz 1f
@@ -531,22 +695,24 @@ int_restore_rest:
531 jmp int_with_check 695 jmp int_with_check
532 CFI_ENDPROC 696 CFI_ENDPROC
533END(system_call) 697END(system_call)
534 698
535/* 699/*
536 * Certain special system calls that need to save a complete full stack frame. 700 * Certain special system calls that need to save a complete full stack frame.
537 */ 701 */
538
539 .macro PTREGSCALL label,func,arg 702 .macro PTREGSCALL label,func,arg
540 .globl \label 703ENTRY(\label)
541\label: 704 PARTIAL_FRAME 1 8 /* offset 8: return address */
542 leaq \func(%rip),%rax 705 subq $REST_SKIP, %rsp
543 leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */ 706 CFI_ADJUST_CFA_OFFSET REST_SKIP
544 jmp ptregscall_common 707 call save_rest
708 DEFAULT_FRAME 0 8 /* offset 8: return address */
709 leaq 8(%rsp), \arg /* pt_regs pointer */
710 call \func
711 jmp ptregscall_common
712 CFI_ENDPROC
545END(\label) 713END(\label)
546 .endm 714 .endm
547 715
548 CFI_STARTPROC
549
550 PTREGSCALL stub_clone, sys_clone, %r8 716 PTREGSCALL stub_clone, sys_clone, %r8
551 PTREGSCALL stub_fork, sys_fork, %rdi 717 PTREGSCALL stub_fork, sys_fork, %rdi
552 PTREGSCALL stub_vfork, sys_vfork, %rdi 718 PTREGSCALL stub_vfork, sys_vfork, %rdi
@@ -554,25 +720,18 @@ END(\label)
554 PTREGSCALL stub_iopl, sys_iopl, %rsi 720 PTREGSCALL stub_iopl, sys_iopl, %rsi
555 721
556ENTRY(ptregscall_common) 722ENTRY(ptregscall_common)
557 popq %r11 723 DEFAULT_FRAME 1 8 /* offset 8: return address */
558 CFI_ADJUST_CFA_OFFSET -8 724 RESTORE_TOP_OF_STACK %r11, 8
559 CFI_REGISTER rip, r11 725 movq_cfi_restore R15+8, r15
560 SAVE_REST 726 movq_cfi_restore R14+8, r14
561 movq %r11, %r15 727 movq_cfi_restore R13+8, r13
562 CFI_REGISTER rip, r15 728 movq_cfi_restore R12+8, r12
563 FIXUP_TOP_OF_STACK %r11 729 movq_cfi_restore RBP+8, rbp
564 call *%rax 730 movq_cfi_restore RBX+8, rbx
565 RESTORE_TOP_OF_STACK %r11 731 ret $REST_SKIP /* pop extended registers */
566 movq %r15, %r11
567 CFI_REGISTER rip, r11
568 RESTORE_REST
569 pushq %r11
570 CFI_ADJUST_CFA_OFFSET 8
571 CFI_REL_OFFSET rip, 0
572 ret
573 CFI_ENDPROC 732 CFI_ENDPROC
574END(ptregscall_common) 733END(ptregscall_common)
575 734
576ENTRY(stub_execve) 735ENTRY(stub_execve)
577 CFI_STARTPROC 736 CFI_STARTPROC
578 popq %r11 737 popq %r11
@@ -588,11 +747,11 @@ ENTRY(stub_execve)
588 jmp int_ret_from_sys_call 747 jmp int_ret_from_sys_call
589 CFI_ENDPROC 748 CFI_ENDPROC
590END(stub_execve) 749END(stub_execve)
591 750
592/* 751/*
593 * sigreturn is special because it needs to restore all registers on return. 752 * sigreturn is special because it needs to restore all registers on return.
594 * This cannot be done with SYSRET, so use the IRET return path instead. 753 * This cannot be done with SYSRET, so use the IRET return path instead.
595 */ 754 */
596ENTRY(stub_rt_sigreturn) 755ENTRY(stub_rt_sigreturn)
597 CFI_STARTPROC 756 CFI_STARTPROC
598 addq $8, %rsp 757 addq $8, %rsp
@@ -608,70 +767,70 @@ ENTRY(stub_rt_sigreturn)
608END(stub_rt_sigreturn) 767END(stub_rt_sigreturn)
609 768
610/* 769/*
611 * initial frame state for interrupts and exceptions 770 * Build the entry stubs and pointer table with some assembler magic.
771 * We pack 7 stubs into a single 32-byte chunk, which will fit in a
772 * single cache line on all modern x86 implementations.
612 */ 773 */
613 .macro _frame ref 774 .section .init.rodata,"a"
614 CFI_STARTPROC simple 775ENTRY(interrupt)
615 CFI_SIGNAL_FRAME 776 .text
616 CFI_DEF_CFA rsp,SS+8-\ref 777 .p2align 5
617 /*CFI_REL_OFFSET ss,SS-\ref*/ 778 .p2align CONFIG_X86_L1_CACHE_SHIFT
618 CFI_REL_OFFSET rsp,RSP-\ref 779ENTRY(irq_entries_start)
619 /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/ 780 INTR_FRAME
620 /*CFI_REL_OFFSET cs,CS-\ref*/ 781vector=FIRST_EXTERNAL_VECTOR
621 CFI_REL_OFFSET rip,RIP-\ref 782.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
622 .endm 783 .balign 32
784 .rept 7
785 .if vector < NR_VECTORS
786 .if vector <> FIRST_EXTERNAL_VECTOR
787 CFI_ADJUST_CFA_OFFSET -8
788 .endif
7891: pushq $(~vector+0x80) /* Note: always in signed byte range */
790 CFI_ADJUST_CFA_OFFSET 8
791 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
792 jmp 2f
793 .endif
794 .previous
795 .quad 1b
796 .text
797vector=vector+1
798 .endif
799 .endr
8002: jmp common_interrupt
801.endr
802 CFI_ENDPROC
803END(irq_entries_start)
623 804
624/* initial frame state for interrupts (and exceptions without error code) */ 805.previous
625#define INTR_FRAME _frame RIP 806END(interrupt)
626/* initial frame state for exceptions with error code (and interrupts with 807.previous
627 vector already pushed) */
628#define XCPT_FRAME _frame ORIG_RAX
629 808
630/* 809/*
631 * Interrupt entry/exit. 810 * Interrupt entry/exit.
632 * 811 *
633 * Interrupt entry points save only callee clobbered registers in fast path. 812 * Interrupt entry points save only callee clobbered registers in fast path.
634 * 813 *
635 * Entry runs with interrupts off. 814 * Entry runs with interrupts off.
636 */ 815 */
637 816
638/* 0(%rsp): interrupt number */ 817/* 0(%rsp): ~(interrupt number) */
639 .macro interrupt func 818 .macro interrupt func
640 cld 819 subq $10*8, %rsp
641 SAVE_ARGS 820 CFI_ADJUST_CFA_OFFSET 10*8
642 leaq -ARGOFFSET(%rsp),%rdi # arg1 for handler 821 call save_args
643 pushq %rbp 822 PARTIAL_FRAME 0
644 /*
645 * Save rbp twice: One is for marking the stack frame, as usual, and the
646 * other, to fill pt_regs properly. This is because bx comes right
647 * before the last saved register in that structure, and not bp. If the
648 * base pointer were in the place bx is today, this would not be needed.
649 */
650 movq %rbp, -8(%rsp)
651 CFI_ADJUST_CFA_OFFSET 8
652 CFI_REL_OFFSET rbp, 0
653 movq %rsp,%rbp
654 CFI_DEF_CFA_REGISTER rbp
655 testl $3,CS(%rdi)
656 je 1f
657 SWAPGS
658 /* irqcount is used to check if a CPU is already on an interrupt
659 stack or not. While this is essentially redundant with preempt_count
660 it is a little cheaper to use a separate counter in the PDA
661 (short of moving irq_enter into assembly, which would be too
662 much work) */
6631: incl %gs:pda_irqcount
664 cmoveq %gs:pda_irqstackptr,%rsp
665 push %rbp # backlink for old unwinder
666 /*
667 * We entered an interrupt context - irqs are off:
668 */
669 TRACE_IRQS_OFF
670 call \func 823 call \func
671 .endm 824 .endm
672 825
673ENTRY(common_interrupt) 826 /*
827 * The interrupt stubs push (~vector+0x80) onto the stack and
828 * then jump to common_interrupt.
829 */
830 .p2align CONFIG_X86_L1_CACHE_SHIFT
831common_interrupt:
674 XCPT_FRAME 832 XCPT_FRAME
833 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
675 interrupt do_IRQ 834 interrupt do_IRQ
676 /* 0(%rsp): oldrsp-ARGOFFSET */ 835 /* 0(%rsp): oldrsp-ARGOFFSET */
677ret_from_intr: 836ret_from_intr:
@@ -685,12 +844,12 @@ exit_intr:
685 GET_THREAD_INFO(%rcx) 844 GET_THREAD_INFO(%rcx)
686 testl $3,CS-ARGOFFSET(%rsp) 845 testl $3,CS-ARGOFFSET(%rsp)
687 je retint_kernel 846 je retint_kernel
688 847
689 /* Interrupt came from user space */ 848 /* Interrupt came from user space */
690 /* 849 /*
691 * Has a correct top of stack, but a partial stack frame 850 * Has a correct top of stack, but a partial stack frame
692 * %rcx: thread info. Interrupts off. 851 * %rcx: thread info. Interrupts off.
693 */ 852 */
694retint_with_reschedule: 853retint_with_reschedule:
695 movl $_TIF_WORK_MASK,%edi 854 movl $_TIF_WORK_MASK,%edi
696retint_check: 855retint_check:
@@ -763,20 +922,20 @@ retint_careful:
763 pushq %rdi 922 pushq %rdi
764 CFI_ADJUST_CFA_OFFSET 8 923 CFI_ADJUST_CFA_OFFSET 8
765 call schedule 924 call schedule
766 popq %rdi 925 popq %rdi
767 CFI_ADJUST_CFA_OFFSET -8 926 CFI_ADJUST_CFA_OFFSET -8
768 GET_THREAD_INFO(%rcx) 927 GET_THREAD_INFO(%rcx)
769 DISABLE_INTERRUPTS(CLBR_NONE) 928 DISABLE_INTERRUPTS(CLBR_NONE)
770 TRACE_IRQS_OFF 929 TRACE_IRQS_OFF
771 jmp retint_check 930 jmp retint_check
772 931
773retint_signal: 932retint_signal:
774 testl $_TIF_DO_NOTIFY_MASK,%edx 933 testl $_TIF_DO_NOTIFY_MASK,%edx
775 jz retint_swapgs 934 jz retint_swapgs
776 TRACE_IRQS_ON 935 TRACE_IRQS_ON
777 ENABLE_INTERRUPTS(CLBR_NONE) 936 ENABLE_INTERRUPTS(CLBR_NONE)
778 SAVE_REST 937 SAVE_REST
779 movq $-1,ORIG_RAX(%rsp) 938 movq $-1,ORIG_RAX(%rsp)
780 xorl %esi,%esi # oldset 939 xorl %esi,%esi # oldset
781 movq %rsp,%rdi # &pt_regs 940 movq %rsp,%rdi # &pt_regs
782 call do_notify_resume 941 call do_notify_resume
@@ -798,324 +957,211 @@ ENTRY(retint_kernel)
798 jnc retint_restore_args 957 jnc retint_restore_args
799 call preempt_schedule_irq 958 call preempt_schedule_irq
800 jmp exit_intr 959 jmp exit_intr
801#endif 960#endif
802 961
803 CFI_ENDPROC 962 CFI_ENDPROC
804END(common_interrupt) 963END(common_interrupt)
805 964
806/* 965/*
807 * APIC interrupts. 966 * APIC interrupts.
808 */ 967 */
809 .macro apicinterrupt num,func 968.macro apicinterrupt num sym do_sym
969ENTRY(\sym)
810 INTR_FRAME 970 INTR_FRAME
811 pushq $~(\num) 971 pushq $~(\num)
812 CFI_ADJUST_CFA_OFFSET 8 972 CFI_ADJUST_CFA_OFFSET 8
813 interrupt \func 973 interrupt \do_sym
814 jmp ret_from_intr 974 jmp ret_from_intr
815 CFI_ENDPROC 975 CFI_ENDPROC
816 .endm 976END(\sym)
817 977.endm
818ENTRY(thermal_interrupt)
819 apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
820END(thermal_interrupt)
821
822ENTRY(threshold_interrupt)
823 apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
824END(threshold_interrupt)
825
826#ifdef CONFIG_SMP
827ENTRY(reschedule_interrupt)
828 apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
829END(reschedule_interrupt)
830
831 .macro INVALIDATE_ENTRY num
832ENTRY(invalidate_interrupt\num)
833 apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
834END(invalidate_interrupt\num)
835 .endm
836 978
837 INVALIDATE_ENTRY 0 979#ifdef CONFIG_SMP
838 INVALIDATE_ENTRY 1 980apicinterrupt IRQ_MOVE_CLEANUP_VECTOR \
839 INVALIDATE_ENTRY 2 981 irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
840 INVALIDATE_ENTRY 3
841 INVALIDATE_ENTRY 4
842 INVALIDATE_ENTRY 5
843 INVALIDATE_ENTRY 6
844 INVALIDATE_ENTRY 7
845
846ENTRY(call_function_interrupt)
847 apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
848END(call_function_interrupt)
849ENTRY(call_function_single_interrupt)
850 apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt
851END(call_function_single_interrupt)
852ENTRY(irq_move_cleanup_interrupt)
853 apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt
854END(irq_move_cleanup_interrupt)
855#endif 982#endif
856 983
857ENTRY(apic_timer_interrupt) 984apicinterrupt UV_BAU_MESSAGE \
858 apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt 985 uv_bau_message_intr1 uv_bau_message_interrupt
859END(apic_timer_interrupt) 986apicinterrupt LOCAL_TIMER_VECTOR \
987 apic_timer_interrupt smp_apic_timer_interrupt
988
989#ifdef CONFIG_SMP
990apicinterrupt INVALIDATE_TLB_VECTOR_START+0 \
991 invalidate_interrupt0 smp_invalidate_interrupt
992apicinterrupt INVALIDATE_TLB_VECTOR_START+1 \
993 invalidate_interrupt1 smp_invalidate_interrupt
994apicinterrupt INVALIDATE_TLB_VECTOR_START+2 \
995 invalidate_interrupt2 smp_invalidate_interrupt
996apicinterrupt INVALIDATE_TLB_VECTOR_START+3 \
997 invalidate_interrupt3 smp_invalidate_interrupt
998apicinterrupt INVALIDATE_TLB_VECTOR_START+4 \
999 invalidate_interrupt4 smp_invalidate_interrupt
1000apicinterrupt INVALIDATE_TLB_VECTOR_START+5 \
1001 invalidate_interrupt5 smp_invalidate_interrupt
1002apicinterrupt INVALIDATE_TLB_VECTOR_START+6 \
1003 invalidate_interrupt6 smp_invalidate_interrupt
1004apicinterrupt INVALIDATE_TLB_VECTOR_START+7 \
1005 invalidate_interrupt7 smp_invalidate_interrupt
1006#endif
860 1007
861ENTRY(uv_bau_message_intr1) 1008apicinterrupt THRESHOLD_APIC_VECTOR \
862 apicinterrupt 220,uv_bau_message_interrupt 1009 threshold_interrupt mce_threshold_interrupt
863END(uv_bau_message_intr1) 1010apicinterrupt THERMAL_APIC_VECTOR \
1011 thermal_interrupt smp_thermal_interrupt
1012
1013#ifdef CONFIG_SMP
1014apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
1015 call_function_single_interrupt smp_call_function_single_interrupt
1016apicinterrupt CALL_FUNCTION_VECTOR \
1017 call_function_interrupt smp_call_function_interrupt
1018apicinterrupt RESCHEDULE_VECTOR \
1019 reschedule_interrupt smp_reschedule_interrupt
1020#endif
864 1021
865ENTRY(error_interrupt) 1022apicinterrupt ERROR_APIC_VECTOR \
866 apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt 1023 error_interrupt smp_error_interrupt
867END(error_interrupt) 1024apicinterrupt SPURIOUS_APIC_VECTOR \
1025 spurious_interrupt smp_spurious_interrupt
868 1026
869ENTRY(spurious_interrupt)
870 apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
871END(spurious_interrupt)
872
873/* 1027/*
874 * Exception entry points. 1028 * Exception entry points.
875 */ 1029 */
876 .macro zeroentry sym 1030.macro zeroentry sym do_sym
1031ENTRY(\sym)
877 INTR_FRAME 1032 INTR_FRAME
878 PARAVIRT_ADJUST_EXCEPTION_FRAME 1033 PARAVIRT_ADJUST_EXCEPTION_FRAME
879 pushq $0 /* push error code/oldrax */ 1034 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
880 CFI_ADJUST_CFA_OFFSET 8 1035 subq $15*8,%rsp
881 pushq %rax /* push real oldrax to the rdi slot */ 1036 CFI_ADJUST_CFA_OFFSET 15*8
882 CFI_ADJUST_CFA_OFFSET 8 1037 call error_entry
883 CFI_REL_OFFSET rax,0 1038 DEFAULT_FRAME 0
884 leaq \sym(%rip),%rax 1039 movq %rsp,%rdi /* pt_regs pointer */
885 jmp error_entry 1040 xorl %esi,%esi /* no error code */
1041 call \do_sym
1042 jmp error_exit /* %ebx: no swapgs flag */
886 CFI_ENDPROC 1043 CFI_ENDPROC
887 .endm 1044END(\sym)
1045.endm
888 1046
889 .macro errorentry sym 1047.macro paranoidzeroentry sym do_sym
890 XCPT_FRAME 1048ENTRY(\sym)
1049 INTR_FRAME
891 PARAVIRT_ADJUST_EXCEPTION_FRAME 1050 PARAVIRT_ADJUST_EXCEPTION_FRAME
892 pushq %rax 1051 pushq $-1 /* ORIG_RAX: no syscall to restart */
893 CFI_ADJUST_CFA_OFFSET 8 1052 CFI_ADJUST_CFA_OFFSET 8
894 CFI_REL_OFFSET rax,0 1053 subq $15*8, %rsp
895 leaq \sym(%rip),%rax 1054 call save_paranoid
896 jmp error_entry 1055 TRACE_IRQS_OFF
1056 movq %rsp,%rdi /* pt_regs pointer */
1057 xorl %esi,%esi /* no error code */
1058 call \do_sym
1059 jmp paranoid_exit /* %ebx: no swapgs flag */
897 CFI_ENDPROC 1060 CFI_ENDPROC
898 .endm 1061END(\sym)
1062.endm
899 1063
900 /* error code is on the stack already */ 1064.macro paranoidzeroentry_ist sym do_sym ist
901 /* handle NMI like exceptions that can happen everywhere */ 1065ENTRY(\sym)
902 .macro paranoidentry sym, ist=0, irqtrace=1 1066 INTR_FRAME
903 SAVE_ALL 1067 PARAVIRT_ADJUST_EXCEPTION_FRAME
904 cld 1068 pushq $-1 /* ORIG_RAX: no syscall to restart */
905 movl $1,%ebx 1069 CFI_ADJUST_CFA_OFFSET 8
906 movl $MSR_GS_BASE,%ecx 1070 subq $15*8, %rsp
907 rdmsr 1071 call save_paranoid
908 testl %edx,%edx
909 js 1f
910 SWAPGS
911 xorl %ebx,%ebx
9121:
913 .if \ist
914 movq %gs:pda_data_offset, %rbp
915 .endif
916 .if \irqtrace
917 TRACE_IRQS_OFF
918 .endif
919 movq %rsp,%rdi
920 movq ORIG_RAX(%rsp),%rsi
921 movq $-1,ORIG_RAX(%rsp)
922 .if \ist
923 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
924 .endif
925 call \sym
926 .if \ist
927 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
928 .endif
929 DISABLE_INTERRUPTS(CLBR_NONE)
930 .if \irqtrace
931 TRACE_IRQS_OFF 1072 TRACE_IRQS_OFF
932 .endif 1073 movq %rsp,%rdi /* pt_regs pointer */
933 .endm 1074 xorl %esi,%esi /* no error code */
1075 movq %gs:pda_data_offset, %rbp
1076 subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1077 call \do_sym
1078 addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
1079 jmp paranoid_exit /* %ebx: no swapgs flag */
1080 CFI_ENDPROC
1081END(\sym)
1082.endm
934 1083
935 /* 1084.macro errorentry sym do_sym
936 * "Paranoid" exit path from exception stack. 1085ENTRY(\sym)
937 * Paranoid because this is used by NMIs and cannot take 1086 XCPT_FRAME
938 * any kernel state for granted. 1087 PARAVIRT_ADJUST_EXCEPTION_FRAME
939 * We don't do kernel preemption checks here, because only 1088 subq $15*8,%rsp
940 * NMI should be common and it does not enable IRQs and 1089 CFI_ADJUST_CFA_OFFSET 15*8
941 * cannot get reschedule ticks. 1090 call error_entry
942 * 1091 DEFAULT_FRAME 0
943 * "trace" is 0 for the NMI handler only, because irq-tracing 1092 movq %rsp,%rdi /* pt_regs pointer */
944 * is fundamentally NMI-unsafe. (we cannot change the soft and 1093 movq ORIG_RAX(%rsp),%rsi /* get error code */
945 * hard flags at once, atomically) 1094 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
946 */ 1095 call \do_sym
947 .macro paranoidexit trace=1 1096 jmp error_exit /* %ebx: no swapgs flag */
948 /* ebx: no swapgs flag */
949paranoid_exit\trace:
950 testl %ebx,%ebx /* swapgs needed? */
951 jnz paranoid_restore\trace
952 testl $3,CS(%rsp)
953 jnz paranoid_userspace\trace
954paranoid_swapgs\trace:
955 .if \trace
956 TRACE_IRQS_IRETQ 0
957 .endif
958 SWAPGS_UNSAFE_STACK
959paranoid_restore\trace:
960 RESTORE_ALL 8
961 jmp irq_return
962paranoid_userspace\trace:
963 GET_THREAD_INFO(%rcx)
964 movl TI_flags(%rcx),%ebx
965 andl $_TIF_WORK_MASK,%ebx
966 jz paranoid_swapgs\trace
967 movq %rsp,%rdi /* &pt_regs */
968 call sync_regs
969 movq %rax,%rsp /* switch stack for scheduling */
970 testl $_TIF_NEED_RESCHED,%ebx
971 jnz paranoid_schedule\trace
972 movl %ebx,%edx /* arg3: thread flags */
973 .if \trace
974 TRACE_IRQS_ON
975 .endif
976 ENABLE_INTERRUPTS(CLBR_NONE)
977 xorl %esi,%esi /* arg2: oldset */
978 movq %rsp,%rdi /* arg1: &pt_regs */
979 call do_notify_resume
980 DISABLE_INTERRUPTS(CLBR_NONE)
981 .if \trace
982 TRACE_IRQS_OFF
983 .endif
984 jmp paranoid_userspace\trace
985paranoid_schedule\trace:
986 .if \trace
987 TRACE_IRQS_ON
988 .endif
989 ENABLE_INTERRUPTS(CLBR_ANY)
990 call schedule
991 DISABLE_INTERRUPTS(CLBR_ANY)
992 .if \trace
993 TRACE_IRQS_OFF
994 .endif
995 jmp paranoid_userspace\trace
996 CFI_ENDPROC 1097 CFI_ENDPROC
997 .endm 1098END(\sym)
1099.endm
998 1100
999/* 1101 /* error code is on the stack already */
1000 * Exception entry point. This expects an error code/orig_rax on the stack 1102.macro paranoiderrorentry sym do_sym
1001 * and the exception handler in %rax. 1103ENTRY(\sym)
1002 */ 1104 XCPT_FRAME
1003KPROBE_ENTRY(error_entry) 1105 PARAVIRT_ADJUST_EXCEPTION_FRAME
1004 _frame RDI 1106 subq $15*8,%rsp
1005 CFI_REL_OFFSET rax,0 1107 CFI_ADJUST_CFA_OFFSET 15*8
1006 /* rdi slot contains rax, oldrax contains error code */ 1108 call save_paranoid
1007 cld 1109 DEFAULT_FRAME 0
1008 subq $14*8,%rsp
1009 CFI_ADJUST_CFA_OFFSET (14*8)
1010 movq %rsi,13*8(%rsp)
1011 CFI_REL_OFFSET rsi,RSI
1012 movq 14*8(%rsp),%rsi /* load rax from rdi slot */
1013 CFI_REGISTER rax,rsi
1014 movq %rdx,12*8(%rsp)
1015 CFI_REL_OFFSET rdx,RDX
1016 movq %rcx,11*8(%rsp)
1017 CFI_REL_OFFSET rcx,RCX
1018 movq %rsi,10*8(%rsp) /* store rax */
1019 CFI_REL_OFFSET rax,RAX
1020 movq %r8, 9*8(%rsp)
1021 CFI_REL_OFFSET r8,R8
1022 movq %r9, 8*8(%rsp)
1023 CFI_REL_OFFSET r9,R9
1024 movq %r10,7*8(%rsp)
1025 CFI_REL_OFFSET r10,R10
1026 movq %r11,6*8(%rsp)
1027 CFI_REL_OFFSET r11,R11
1028 movq %rbx,5*8(%rsp)
1029 CFI_REL_OFFSET rbx,RBX
1030 movq %rbp,4*8(%rsp)
1031 CFI_REL_OFFSET rbp,RBP
1032 movq %r12,3*8(%rsp)
1033 CFI_REL_OFFSET r12,R12
1034 movq %r13,2*8(%rsp)
1035 CFI_REL_OFFSET r13,R13
1036 movq %r14,1*8(%rsp)
1037 CFI_REL_OFFSET r14,R14
1038 movq %r15,(%rsp)
1039 CFI_REL_OFFSET r15,R15
1040 xorl %ebx,%ebx
1041 testl $3,CS(%rsp)
1042 je error_kernelspace
1043error_swapgs:
1044 SWAPGS
1045error_sti:
1046 TRACE_IRQS_OFF
1047 movq %rdi,RDI(%rsp)
1048 CFI_REL_OFFSET rdi,RDI
1049 movq %rsp,%rdi
1050 movq ORIG_RAX(%rsp),%rsi /* get error code */
1051 movq $-1,ORIG_RAX(%rsp)
1052 call *%rax
1053 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1054error_exit:
1055 movl %ebx,%eax
1056 RESTORE_REST
1057 DISABLE_INTERRUPTS(CLBR_NONE)
1058 TRACE_IRQS_OFF 1110 TRACE_IRQS_OFF
1059 GET_THREAD_INFO(%rcx) 1111 movq %rsp,%rdi /* pt_regs pointer */
1060 testl %eax,%eax 1112 movq ORIG_RAX(%rsp),%rsi /* get error code */
1061 jne retint_kernel 1113 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
1062 LOCKDEP_SYS_EXIT_IRQ 1114 call \do_sym
1063 movl TI_flags(%rcx),%edx 1115 jmp paranoid_exit /* %ebx: no swapgs flag */
1064 movl $_TIF_WORK_MASK,%edi
1065 andl %edi,%edx
1066 jnz retint_careful
1067 jmp retint_swapgs
1068 CFI_ENDPROC 1116 CFI_ENDPROC
1117END(\sym)
1118.endm
1069 1119
1070error_kernelspace: 1120zeroentry divide_error do_divide_error
1071 incl %ebx 1121zeroentry overflow do_overflow
1072 /* There are two places in the kernel that can potentially fault with 1122zeroentry bounds do_bounds
1073 usergs. Handle them here. The exception handlers after 1123zeroentry invalid_op do_invalid_op
1074 iret run with kernel gs again, so don't set the user space flag. 1124zeroentry device_not_available do_device_not_available
1075 B stepping K8s sometimes report an truncated RIP for IRET 1125paranoiderrorentry double_fault do_double_fault
1076 exceptions returning to compat mode. Check for these here too. */ 1126zeroentry coprocessor_segment_overrun do_coprocessor_segment_overrun
1077 leaq irq_return(%rip),%rcx 1127errorentry invalid_TSS do_invalid_TSS
1078 cmpq %rcx,RIP(%rsp) 1128errorentry segment_not_present do_segment_not_present
1079 je error_swapgs 1129zeroentry spurious_interrupt_bug do_spurious_interrupt_bug
1080 movl %ecx,%ecx /* zero extend */ 1130zeroentry coprocessor_error do_coprocessor_error
1081 cmpq %rcx,RIP(%rsp) 1131errorentry alignment_check do_alignment_check
1082 je error_swapgs 1132zeroentry simd_coprocessor_error do_simd_coprocessor_error
1083 cmpq $gs_change,RIP(%rsp) 1133
1084 je error_swapgs 1134 /* Reload gs selector with exception handling */
1085 jmp error_sti 1135 /* edi: new selector */
1086KPROBE_END(error_entry)
1087
1088 /* Reload gs selector with exception handling */
1089 /* edi: new selector */
1090ENTRY(native_load_gs_index) 1136ENTRY(native_load_gs_index)
1091 CFI_STARTPROC 1137 CFI_STARTPROC
1092 pushf 1138 pushf
1093 CFI_ADJUST_CFA_OFFSET 8 1139 CFI_ADJUST_CFA_OFFSET 8
1094 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI)) 1140 DISABLE_INTERRUPTS(CLBR_ANY | ~(CLBR_RDI))
1095 SWAPGS 1141 SWAPGS
1096gs_change: 1142gs_change:
1097 movl %edi,%gs 1143 movl %edi,%gs
10982: mfence /* workaround */ 11442: mfence /* workaround */
1099 SWAPGS 1145 SWAPGS
1100 popf 1146 popf
1101 CFI_ADJUST_CFA_OFFSET -8 1147 CFI_ADJUST_CFA_OFFSET -8
1102 ret 1148 ret
1103 CFI_ENDPROC 1149 CFI_ENDPROC
1104ENDPROC(native_load_gs_index) 1150END(native_load_gs_index)
1105 1151
1106 .section __ex_table,"a" 1152 .section __ex_table,"a"
1107 .align 8 1153 .align 8
1108 .quad gs_change,bad_gs 1154 .quad gs_change,bad_gs
1109 .previous 1155 .previous
1110 .section .fixup,"ax" 1156 .section .fixup,"ax"
1111 /* running with kernelgs */ 1157 /* running with kernelgs */
1112bad_gs: 1158bad_gs:
1113 SWAPGS /* switch back to user gs */ 1159 SWAPGS /* switch back to user gs */
1114 xorl %eax,%eax 1160 xorl %eax,%eax
1115 movl %eax,%gs 1161 movl %eax,%gs
1116 jmp 2b 1162 jmp 2b
1117 .previous 1163 .previous
1118 1164
1119/* 1165/*
1120 * Create a kernel thread. 1166 * Create a kernel thread.
1121 * 1167 *
@@ -1138,7 +1184,7 @@ ENTRY(kernel_thread)
1138 1184
1139 xorl %r8d,%r8d 1185 xorl %r8d,%r8d
1140 xorl %r9d,%r9d 1186 xorl %r9d,%r9d
1141 1187
1142 # clone now 1188 # clone now
1143 call do_fork 1189 call do_fork
1144 movq %rax,RAX(%rsp) 1190 movq %rax,RAX(%rsp)
@@ -1149,15 +1195,15 @@ ENTRY(kernel_thread)
1149 * so internally to the x86_64 port you can rely on kernel_thread() 1195 * so internally to the x86_64 port you can rely on kernel_thread()
1150 * not to reschedule the child before returning, this avoids the need 1196 * not to reschedule the child before returning, this avoids the need
1151 * of hacks for example to fork off the per-CPU idle tasks. 1197 * of hacks for example to fork off the per-CPU idle tasks.
1152 * [Hopefully no generic code relies on the reschedule -AK] 1198 * [Hopefully no generic code relies on the reschedule -AK]
1153 */ 1199 */
1154 RESTORE_ALL 1200 RESTORE_ALL
1155 UNFAKE_STACK_FRAME 1201 UNFAKE_STACK_FRAME
1156 ret 1202 ret
1157 CFI_ENDPROC 1203 CFI_ENDPROC
1158ENDPROC(kernel_thread) 1204END(kernel_thread)
1159 1205
1160child_rip: 1206ENTRY(child_rip)
1161 pushq $0 # fake return address 1207 pushq $0 # fake return address
1162 CFI_STARTPROC 1208 CFI_STARTPROC
1163 /* 1209 /*
@@ -1170,8 +1216,9 @@ child_rip:
1170 # exit 1216 # exit
1171 mov %eax, %edi 1217 mov %eax, %edi
1172 call do_exit 1218 call do_exit
1219 ud2 # padding for call trace
1173 CFI_ENDPROC 1220 CFI_ENDPROC
1174ENDPROC(child_rip) 1221END(child_rip)
1175 1222
1176/* 1223/*
1177 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly. 1224 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
@@ -1191,10 +1238,10 @@ ENDPROC(child_rip)
1191ENTRY(kernel_execve) 1238ENTRY(kernel_execve)
1192 CFI_STARTPROC 1239 CFI_STARTPROC
1193 FAKE_STACK_FRAME $0 1240 FAKE_STACK_FRAME $0
1194 SAVE_ALL 1241 SAVE_ALL
1195 movq %rsp,%rcx 1242 movq %rsp,%rcx
1196 call sys_execve 1243 call sys_execve
1197 movq %rax, RAX(%rsp) 1244 movq %rax, RAX(%rsp)
1198 RESTORE_REST 1245 RESTORE_REST
1199 testq %rax,%rax 1246 testq %rax,%rax
1200 je int_ret_from_sys_call 1247 je int_ret_from_sys_call
@@ -1202,129 +1249,7 @@ ENTRY(kernel_execve)
1202 UNFAKE_STACK_FRAME 1249 UNFAKE_STACK_FRAME
1203 ret 1250 ret
1204 CFI_ENDPROC 1251 CFI_ENDPROC
1205ENDPROC(kernel_execve) 1252END(kernel_execve)
1206
1207KPROBE_ENTRY(page_fault)
1208 errorentry do_page_fault
1209KPROBE_END(page_fault)
1210
1211ENTRY(coprocessor_error)
1212 zeroentry do_coprocessor_error
1213END(coprocessor_error)
1214
1215ENTRY(simd_coprocessor_error)
1216 zeroentry do_simd_coprocessor_error
1217END(simd_coprocessor_error)
1218
1219ENTRY(device_not_available)
1220 zeroentry do_device_not_available
1221END(device_not_available)
1222
1223 /* runs on exception stack */
1224KPROBE_ENTRY(debug)
1225 INTR_FRAME
1226 PARAVIRT_ADJUST_EXCEPTION_FRAME
1227 pushq $0
1228 CFI_ADJUST_CFA_OFFSET 8
1229 paranoidentry do_debug, DEBUG_STACK
1230 paranoidexit
1231KPROBE_END(debug)
1232
1233 /* runs on exception stack */
1234KPROBE_ENTRY(nmi)
1235 INTR_FRAME
1236 PARAVIRT_ADJUST_EXCEPTION_FRAME
1237 pushq $-1
1238 CFI_ADJUST_CFA_OFFSET 8
1239 paranoidentry do_nmi, 0, 0
1240#ifdef CONFIG_TRACE_IRQFLAGS
1241 paranoidexit 0
1242#else
1243 jmp paranoid_exit1
1244 CFI_ENDPROC
1245#endif
1246KPROBE_END(nmi)
1247
1248KPROBE_ENTRY(int3)
1249 INTR_FRAME
1250 PARAVIRT_ADJUST_EXCEPTION_FRAME
1251 pushq $0
1252 CFI_ADJUST_CFA_OFFSET 8
1253 paranoidentry do_int3, DEBUG_STACK
1254 jmp paranoid_exit1
1255 CFI_ENDPROC
1256KPROBE_END(int3)
1257
1258ENTRY(overflow)
1259 zeroentry do_overflow
1260END(overflow)
1261
1262ENTRY(bounds)
1263 zeroentry do_bounds
1264END(bounds)
1265
1266ENTRY(invalid_op)
1267 zeroentry do_invalid_op
1268END(invalid_op)
1269
1270ENTRY(coprocessor_segment_overrun)
1271 zeroentry do_coprocessor_segment_overrun
1272END(coprocessor_segment_overrun)
1273
1274 /* runs on exception stack */
1275ENTRY(double_fault)
1276 XCPT_FRAME
1277 PARAVIRT_ADJUST_EXCEPTION_FRAME
1278 paranoidentry do_double_fault
1279 jmp paranoid_exit1
1280 CFI_ENDPROC
1281END(double_fault)
1282
1283ENTRY(invalid_TSS)
1284 errorentry do_invalid_TSS
1285END(invalid_TSS)
1286
1287ENTRY(segment_not_present)
1288 errorentry do_segment_not_present
1289END(segment_not_present)
1290
1291 /* runs on exception stack */
1292ENTRY(stack_segment)
1293 XCPT_FRAME
1294 PARAVIRT_ADJUST_EXCEPTION_FRAME
1295 paranoidentry do_stack_segment
1296 jmp paranoid_exit1
1297 CFI_ENDPROC
1298END(stack_segment)
1299
1300KPROBE_ENTRY(general_protection)
1301 errorentry do_general_protection
1302KPROBE_END(general_protection)
1303
1304ENTRY(alignment_check)
1305 errorentry do_alignment_check
1306END(alignment_check)
1307
1308ENTRY(divide_error)
1309 zeroentry do_divide_error
1310END(divide_error)
1311
1312ENTRY(spurious_interrupt_bug)
1313 zeroentry do_spurious_interrupt_bug
1314END(spurious_interrupt_bug)
1315
1316#ifdef CONFIG_X86_MCE
1317 /* runs on exception stack */
1318ENTRY(machine_check)
1319 INTR_FRAME
1320 PARAVIRT_ADJUST_EXCEPTION_FRAME
1321 pushq $0
1322 CFI_ADJUST_CFA_OFFSET 8
1323 paranoidentry do_machine_check
1324 jmp paranoid_exit1
1325 CFI_ENDPROC
1326END(machine_check)
1327#endif
1328 1253
1329/* Call softirq on interrupt stack. Interrupts are off. */ 1254/* Call softirq on interrupt stack. Interrupts are off. */
1330ENTRY(call_softirq) 1255ENTRY(call_softirq)
@@ -1344,40 +1269,33 @@ ENTRY(call_softirq)
1344 decl %gs:pda_irqcount 1269 decl %gs:pda_irqcount
1345 ret 1270 ret
1346 CFI_ENDPROC 1271 CFI_ENDPROC
1347ENDPROC(call_softirq) 1272END(call_softirq)
1348
1349KPROBE_ENTRY(ignore_sysret)
1350 CFI_STARTPROC
1351 mov $-ENOSYS,%eax
1352 sysret
1353 CFI_ENDPROC
1354ENDPROC(ignore_sysret)
1355 1273
1356#ifdef CONFIG_XEN 1274#ifdef CONFIG_XEN
1357ENTRY(xen_hypervisor_callback) 1275zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
1358 zeroentry xen_do_hypervisor_callback
1359END(xen_hypervisor_callback)
1360 1276
1361/* 1277/*
1362# A note on the "critical region" in our callback handler. 1278 * A note on the "critical region" in our callback handler.
1363# We want to avoid stacking callback handlers due to events occurring 1279 * We want to avoid stacking callback handlers due to events occurring
1364# during handling of the last event. To do this, we keep events disabled 1280 * during handling of the last event. To do this, we keep events disabled
1365# until we've done all processing. HOWEVER, we must enable events before 1281 * until we've done all processing. HOWEVER, we must enable events before
1366# popping the stack frame (can't be done atomically) and so it would still 1282 * popping the stack frame (can't be done atomically) and so it would still
1367# be possible to get enough handler activations to overflow the stack. 1283 * be possible to get enough handler activations to overflow the stack.
1368# Although unlikely, bugs of that kind are hard to track down, so we'd 1284 * Although unlikely, bugs of that kind are hard to track down, so we'd
1369# like to avoid the possibility. 1285 * like to avoid the possibility.
1370# So, on entry to the handler we detect whether we interrupted an 1286 * So, on entry to the handler we detect whether we interrupted an
1371# existing activation in its critical region -- if so, we pop the current 1287 * existing activation in its critical region -- if so, we pop the current
1372# activation and restart the handler using the previous one. 1288 * activation and restart the handler using the previous one.
1373*/ 1289 */
1374ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) 1290ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1375 CFI_STARTPROC 1291 CFI_STARTPROC
1376/* Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will 1292/*
1377 see the correct pointer to the pt_regs */ 1293 * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
1294 * see the correct pointer to the pt_regs
1295 */
1378 movq %rdi, %rsp # we don't return, adjust the stack frame 1296 movq %rdi, %rsp # we don't return, adjust the stack frame
1379 CFI_ENDPROC 1297 CFI_ENDPROC
1380 CFI_DEFAULT_STACK 1298 DEFAULT_FRAME
138111: incl %gs:pda_irqcount 129911: incl %gs:pda_irqcount
1382 movq %rsp,%rbp 1300 movq %rsp,%rbp
1383 CFI_DEF_CFA_REGISTER rbp 1301 CFI_DEF_CFA_REGISTER rbp
@@ -1392,23 +1310,26 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
1392END(do_hypervisor_callback) 1310END(do_hypervisor_callback)
1393 1311
1394/* 1312/*
1395# Hypervisor uses this for application faults while it executes. 1313 * Hypervisor uses this for application faults while it executes.
1396# We get here for two reasons: 1314 * We get here for two reasons:
1397# 1. Fault while reloading DS, ES, FS or GS 1315 * 1. Fault while reloading DS, ES, FS or GS
1398# 2. Fault while executing IRET 1316 * 2. Fault while executing IRET
1399# Category 1 we do not need to fix up as Xen has already reloaded all segment 1317 * Category 1 we do not need to fix up as Xen has already reloaded all segment
1400# registers that could be reloaded and zeroed the others. 1318 * registers that could be reloaded and zeroed the others.
1401# Category 2 we fix up by killing the current process. We cannot use the 1319 * Category 2 we fix up by killing the current process. We cannot use the
1402# normal Linux return path in this case because if we use the IRET hypercall 1320 * normal Linux return path in this case because if we use the IRET hypercall
1403# to pop the stack frame we end up in an infinite loop of failsafe callbacks. 1321 * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
1404# We distinguish between categories by comparing each saved segment register 1322 * We distinguish between categories by comparing each saved segment register
1405# with its current contents: any discrepancy means we in category 1. 1323 * with its current contents: any discrepancy means we in category 1.
1406*/ 1324 */
1407ENTRY(xen_failsafe_callback) 1325ENTRY(xen_failsafe_callback)
1408 framesz = (RIP-0x30) /* workaround buggy gas */ 1326 INTR_FRAME 1 (6*8)
1409 _frame framesz 1327 /*CFI_REL_OFFSET gs,GS*/
1410 CFI_REL_OFFSET rcx, 0 1328 /*CFI_REL_OFFSET fs,FS*/
1411 CFI_REL_OFFSET r11, 8 1329 /*CFI_REL_OFFSET es,ES*/
1330 /*CFI_REL_OFFSET ds,DS*/
1331 CFI_REL_OFFSET r11,8
1332 CFI_REL_OFFSET rcx,0
1412 movw %ds,%cx 1333 movw %ds,%cx
1413 cmpw %cx,0x10(%rsp) 1334 cmpw %cx,0x10(%rsp)
1414 CFI_REMEMBER_STATE 1335 CFI_REMEMBER_STATE
@@ -1429,12 +1350,9 @@ ENTRY(xen_failsafe_callback)
1429 CFI_RESTORE r11 1350 CFI_RESTORE r11
1430 addq $0x30,%rsp 1351 addq $0x30,%rsp
1431 CFI_ADJUST_CFA_OFFSET -0x30 1352 CFI_ADJUST_CFA_OFFSET -0x30
1432 pushq $0 1353 pushq_cfi $0 /* RIP */
1433 CFI_ADJUST_CFA_OFFSET 8 1354 pushq_cfi %r11
1434 pushq %r11 1355 pushq_cfi %rcx
1435 CFI_ADJUST_CFA_OFFSET 8
1436 pushq %rcx
1437 CFI_ADJUST_CFA_OFFSET 8
1438 jmp general_protection 1356 jmp general_protection
1439 CFI_RESTORE_STATE 1357 CFI_RESTORE_STATE
14401: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */ 13581: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
@@ -1444,11 +1362,223 @@ ENTRY(xen_failsafe_callback)
1444 CFI_RESTORE r11 1362 CFI_RESTORE r11
1445 addq $0x30,%rsp 1363 addq $0x30,%rsp
1446 CFI_ADJUST_CFA_OFFSET -0x30 1364 CFI_ADJUST_CFA_OFFSET -0x30
1447 pushq $0 1365 pushq_cfi $0
1448 CFI_ADJUST_CFA_OFFSET 8
1449 SAVE_ALL 1366 SAVE_ALL
1450 jmp error_exit 1367 jmp error_exit
1451 CFI_ENDPROC 1368 CFI_ENDPROC
1452END(xen_failsafe_callback) 1369END(xen_failsafe_callback)
1453 1370
1454#endif /* CONFIG_XEN */ 1371#endif /* CONFIG_XEN */
1372
1373/*
1374 * Some functions should be protected against kprobes
1375 */
1376 .pushsection .kprobes.text, "ax"
1377
1378paranoidzeroentry_ist debug do_debug DEBUG_STACK
1379paranoidzeroentry_ist int3 do_int3 DEBUG_STACK
1380paranoiderrorentry stack_segment do_stack_segment
1381errorentry general_protection do_general_protection
1382errorentry page_fault do_page_fault
1383#ifdef CONFIG_X86_MCE
1384paranoidzeroentry machine_check do_machine_check
1385#endif
1386
1387 /*
1388 * "Paranoid" exit path from exception stack.
1389 * Paranoid because this is used by NMIs and cannot take
1390 * any kernel state for granted.
1391 * We don't do kernel preemption checks here, because only
1392 * NMI should be common and it does not enable IRQs and
1393 * cannot get reschedule ticks.
1394 *
1395 * "trace" is 0 for the NMI handler only, because irq-tracing
1396 * is fundamentally NMI-unsafe. (we cannot change the soft and
1397 * hard flags at once, atomically)
1398 */
1399
1400 /* ebx: no swapgs flag */
1401ENTRY(paranoid_exit)
1402 INTR_FRAME
1403 DISABLE_INTERRUPTS(CLBR_NONE)
1404 TRACE_IRQS_OFF
1405 testl %ebx,%ebx /* swapgs needed? */
1406 jnz paranoid_restore
1407 testl $3,CS(%rsp)
1408 jnz paranoid_userspace
1409paranoid_swapgs:
1410 TRACE_IRQS_IRETQ 0
1411 SWAPGS_UNSAFE_STACK
1412paranoid_restore:
1413 RESTORE_ALL 8
1414 jmp irq_return
1415paranoid_userspace:
1416 GET_THREAD_INFO(%rcx)
1417 movl TI_flags(%rcx),%ebx
1418 andl $_TIF_WORK_MASK,%ebx
1419 jz paranoid_swapgs
1420 movq %rsp,%rdi /* &pt_regs */
1421 call sync_regs
1422 movq %rax,%rsp /* switch stack for scheduling */
1423 testl $_TIF_NEED_RESCHED,%ebx
1424 jnz paranoid_schedule
1425 movl %ebx,%edx /* arg3: thread flags */
1426 TRACE_IRQS_ON
1427 ENABLE_INTERRUPTS(CLBR_NONE)
1428 xorl %esi,%esi /* arg2: oldset */
1429 movq %rsp,%rdi /* arg1: &pt_regs */
1430 call do_notify_resume
1431 DISABLE_INTERRUPTS(CLBR_NONE)
1432 TRACE_IRQS_OFF
1433 jmp paranoid_userspace
1434paranoid_schedule:
1435 TRACE_IRQS_ON
1436 ENABLE_INTERRUPTS(CLBR_ANY)
1437 call schedule
1438 DISABLE_INTERRUPTS(CLBR_ANY)
1439 TRACE_IRQS_OFF
1440 jmp paranoid_userspace
1441 CFI_ENDPROC
1442END(paranoid_exit)
1443
1444/*
1445 * Exception entry point. This expects an error code/orig_rax on the stack.
1446 * returns in "no swapgs flag" in %ebx.
1447 */
1448ENTRY(error_entry)
1449 XCPT_FRAME
1450 CFI_ADJUST_CFA_OFFSET 15*8
1451 /* oldrax contains error code */
1452 cld
1453 movq_cfi rdi, RDI+8
1454 movq_cfi rsi, RSI+8
1455 movq_cfi rdx, RDX+8
1456 movq_cfi rcx, RCX+8
1457 movq_cfi rax, RAX+8
1458 movq_cfi r8, R8+8
1459 movq_cfi r9, R9+8
1460 movq_cfi r10, R10+8
1461 movq_cfi r11, R11+8
1462 movq_cfi rbx, RBX+8
1463 movq_cfi rbp, RBP+8
1464 movq_cfi r12, R12+8
1465 movq_cfi r13, R13+8
1466 movq_cfi r14, R14+8
1467 movq_cfi r15, R15+8
1468 xorl %ebx,%ebx
1469 testl $3,CS+8(%rsp)
1470 je error_kernelspace
1471error_swapgs:
1472 SWAPGS
1473error_sti:
1474 TRACE_IRQS_OFF
1475 ret
1476 CFI_ENDPROC
1477
1478/*
1479 * There are two places in the kernel that can potentially fault with
1480 * usergs. Handle them here. The exception handlers after iret run with
1481 * kernel gs again, so don't set the user space flag. B stepping K8s
1482 * sometimes report an truncated RIP for IRET exceptions returning to
1483 * compat mode. Check for these here too.
1484 */
1485error_kernelspace:
1486 incl %ebx
1487 leaq irq_return(%rip),%rcx
1488 cmpq %rcx,RIP+8(%rsp)
1489 je error_swapgs
1490 movl %ecx,%ecx /* zero extend */
1491 cmpq %rcx,RIP+8(%rsp)
1492 je error_swapgs
1493 cmpq $gs_change,RIP+8(%rsp)
1494 je error_swapgs
1495 jmp error_sti
1496END(error_entry)
1497
1498
1499/* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
1500ENTRY(error_exit)
1501 DEFAULT_FRAME
1502 movl %ebx,%eax
1503 RESTORE_REST
1504 DISABLE_INTERRUPTS(CLBR_NONE)
1505 TRACE_IRQS_OFF
1506 GET_THREAD_INFO(%rcx)
1507 testl %eax,%eax
1508 jne retint_kernel
1509 LOCKDEP_SYS_EXIT_IRQ
1510 movl TI_flags(%rcx),%edx
1511 movl $_TIF_WORK_MASK,%edi
1512 andl %edi,%edx
1513 jnz retint_careful
1514 jmp retint_swapgs
1515 CFI_ENDPROC
1516END(error_exit)
1517
1518
1519 /* runs on exception stack */
1520ENTRY(nmi)
1521 INTR_FRAME
1522 PARAVIRT_ADJUST_EXCEPTION_FRAME
1523 pushq_cfi $-1
1524 subq $15*8, %rsp
1525 CFI_ADJUST_CFA_OFFSET 15*8
1526 call save_paranoid
1527 DEFAULT_FRAME 0
1528 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
1529 movq %rsp,%rdi
1530 movq $-1,%rsi
1531 call do_nmi
1532#ifdef CONFIG_TRACE_IRQFLAGS
1533 /* paranoidexit; without TRACE_IRQS_OFF */
1534 /* ebx: no swapgs flag */
1535 DISABLE_INTERRUPTS(CLBR_NONE)
1536 testl %ebx,%ebx /* swapgs needed? */
1537 jnz nmi_restore
1538 testl $3,CS(%rsp)
1539 jnz nmi_userspace
1540nmi_swapgs:
1541 SWAPGS_UNSAFE_STACK
1542nmi_restore:
1543 RESTORE_ALL 8
1544 jmp irq_return
1545nmi_userspace:
1546 GET_THREAD_INFO(%rcx)
1547 movl TI_flags(%rcx),%ebx
1548 andl $_TIF_WORK_MASK,%ebx
1549 jz nmi_swapgs
1550 movq %rsp,%rdi /* &pt_regs */
1551 call sync_regs
1552 movq %rax,%rsp /* switch stack for scheduling */
1553 testl $_TIF_NEED_RESCHED,%ebx
1554 jnz nmi_schedule
1555 movl %ebx,%edx /* arg3: thread flags */
1556 ENABLE_INTERRUPTS(CLBR_NONE)
1557 xorl %esi,%esi /* arg2: oldset */
1558 movq %rsp,%rdi /* arg1: &pt_regs */
1559 call do_notify_resume
1560 DISABLE_INTERRUPTS(CLBR_NONE)
1561 jmp nmi_userspace
1562nmi_schedule:
1563 ENABLE_INTERRUPTS(CLBR_ANY)
1564 call schedule
1565 DISABLE_INTERRUPTS(CLBR_ANY)
1566 jmp nmi_userspace
1567 CFI_ENDPROC
1568#else
1569 jmp paranoid_exit
1570 CFI_ENDPROC
1571#endif
1572END(nmi)
1573
1574ENTRY(ignore_sysret)
1575 CFI_STARTPROC
1576 mov $-ENOSYS,%eax
1577 sysret
1578 CFI_ENDPROC
1579END(ignore_sysret)
1580
1581/*
1582 * End of kprobes section
1583 */
1584 .popsection