aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/entry_64.S
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-10-19 15:19:19 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-10-19 15:19:19 -0400
commite05dacd71db0a5da7c1a44bcaab2a8a240b9c233 (patch)
tree31382cf1c7d62c03126448affb2fc86e8c4aaa8b /arch/x86/kernel/entry_64.S
parent3ab0b83bf6a1e834f4b884150d8012990c75d25d (diff)
parentddffeb8c4d0331609ef2581d84de4d763607bd37 (diff)
Merge commit 'v3.7-rc1' into stable/for-linus-3.7
* commit 'v3.7-rc1': (10892 commits) Linux 3.7-rc1 x86, boot: Explicitly include autoconf.h for hostprogs perf: Fix UAPI fallout ARM: config: make sure that platforms are ordered by option string ARM: config: sort select statements alphanumerically UAPI: (Scripted) Disintegrate include/linux/byteorder UAPI: (Scripted) Disintegrate include/linux UAPI: Unexport linux/blk_types.h UAPI: Unexport part of linux/ppp-comp.h perf: Handle new rbtree implementation procfs: don't need a PATH_MAX allocation to hold a string representation of an int vfs: embed struct filename inside of names_cache allocation if possible audit: make audit_inode take struct filename vfs: make path_openat take a struct filename pointer vfs: turn do_path_lookup into wrapper around struct filename variant audit: allow audit code to satisfy getname requests from its names_list vfs: define struct filename and have getname() return it btrfs: Fix compilation with user namespace support enabled userns: Fix posix_acl_file_xattr_userns gid conversion userns: Properly print bluetooth socket uids ...
Diffstat (limited to 'arch/x86/kernel/entry_64.S')
-rw-r--r--arch/x86/kernel/entry_64.S232
1 files changed, 152 insertions, 80 deletions
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index dcdd0ea33a32..b51b2c7ee51f 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -56,6 +56,8 @@
56#include <asm/ftrace.h> 56#include <asm/ftrace.h>
57#include <asm/percpu.h> 57#include <asm/percpu.h>
58#include <asm/asm.h> 58#include <asm/asm.h>
59#include <asm/rcu.h>
60#include <asm/smap.h>
59#include <linux/err.h> 61#include <linux/err.h>
60 62
61/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ 63/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
@@ -68,25 +70,51 @@
68 .section .entry.text, "ax" 70 .section .entry.text, "ax"
69 71
70#ifdef CONFIG_FUNCTION_TRACER 72#ifdef CONFIG_FUNCTION_TRACER
73
74#ifdef CC_USING_FENTRY
75# define function_hook __fentry__
76#else
77# define function_hook mcount
78#endif
79
71#ifdef CONFIG_DYNAMIC_FTRACE 80#ifdef CONFIG_DYNAMIC_FTRACE
72ENTRY(mcount) 81
82ENTRY(function_hook)
73 retq 83 retq
74END(mcount) 84END(function_hook)
85
86/* skip is set if stack has been adjusted */
87.macro ftrace_caller_setup skip=0
88 MCOUNT_SAVE_FRAME \skip
89
90 /* Load the ftrace_ops into the 3rd parameter */
91 leaq function_trace_op, %rdx
92
93 /* Load ip into the first parameter */
94 movq RIP(%rsp), %rdi
95 subq $MCOUNT_INSN_SIZE, %rdi
96 /* Load the parent_ip into the second parameter */
97#ifdef CC_USING_FENTRY
98 movq SS+16(%rsp), %rsi
99#else
100 movq 8(%rbp), %rsi
101#endif
102.endm
75 103
76ENTRY(ftrace_caller) 104ENTRY(ftrace_caller)
105 /* Check if tracing was disabled (quick check) */
77 cmpl $0, function_trace_stop 106 cmpl $0, function_trace_stop
78 jne ftrace_stub 107 jne ftrace_stub
79 108
80 MCOUNT_SAVE_FRAME 109 ftrace_caller_setup
81 110 /* regs go into 4th parameter (but make it NULL) */
82 movq 0x38(%rsp), %rdi 111 movq $0, %rcx
83 movq 8(%rbp), %rsi
84 subq $MCOUNT_INSN_SIZE, %rdi
85 112
86GLOBAL(ftrace_call) 113GLOBAL(ftrace_call)
87 call ftrace_stub 114 call ftrace_stub
88 115
89 MCOUNT_RESTORE_FRAME 116 MCOUNT_RESTORE_FRAME
117ftrace_return:
90 118
91#ifdef CONFIG_FUNCTION_GRAPH_TRACER 119#ifdef CONFIG_FUNCTION_GRAPH_TRACER
92GLOBAL(ftrace_graph_call) 120GLOBAL(ftrace_graph_call)
@@ -97,8 +125,78 @@ GLOBAL(ftrace_stub)
97 retq 125 retq
98END(ftrace_caller) 126END(ftrace_caller)
99 127
128ENTRY(ftrace_regs_caller)
129 /* Save the current flags before compare (in SS location)*/
130 pushfq
131
132 /* Check if tracing was disabled (quick check) */
133 cmpl $0, function_trace_stop
134 jne ftrace_restore_flags
135
136 /* skip=8 to skip flags saved in SS */
137 ftrace_caller_setup 8
138
139 /* Save the rest of pt_regs */
140 movq %r15, R15(%rsp)
141 movq %r14, R14(%rsp)
142 movq %r13, R13(%rsp)
143 movq %r12, R12(%rsp)
144 movq %r11, R11(%rsp)
145 movq %r10, R10(%rsp)
146 movq %rbp, RBP(%rsp)
147 movq %rbx, RBX(%rsp)
148 /* Copy saved flags */
149 movq SS(%rsp), %rcx
150 movq %rcx, EFLAGS(%rsp)
151 /* Kernel segments */
152 movq $__KERNEL_DS, %rcx
153 movq %rcx, SS(%rsp)
154 movq $__KERNEL_CS, %rcx
155 movq %rcx, CS(%rsp)
156 /* Stack - skipping return address */
157 leaq SS+16(%rsp), %rcx
158 movq %rcx, RSP(%rsp)
159
160 /* regs go into 4th parameter */
161 leaq (%rsp), %rcx
162
163GLOBAL(ftrace_regs_call)
164 call ftrace_stub
165
166 /* Copy flags back to SS, to restore them */
167 movq EFLAGS(%rsp), %rax
168 movq %rax, SS(%rsp)
169
170 /* Handlers can change the RIP */
171 movq RIP(%rsp), %rax
172 movq %rax, SS+8(%rsp)
173
174 /* restore the rest of pt_regs */
175 movq R15(%rsp), %r15
176 movq R14(%rsp), %r14
177 movq R13(%rsp), %r13
178 movq R12(%rsp), %r12
179 movq R10(%rsp), %r10
180 movq RBP(%rsp), %rbp
181 movq RBX(%rsp), %rbx
182
183 /* skip=8 to skip flags saved in SS */
184 MCOUNT_RESTORE_FRAME 8
185
186 /* Restore flags */
187 popfq
188
189 jmp ftrace_return
190ftrace_restore_flags:
191 popfq
192 jmp ftrace_stub
193
194END(ftrace_regs_caller)
195
196
100#else /* ! CONFIG_DYNAMIC_FTRACE */ 197#else /* ! CONFIG_DYNAMIC_FTRACE */
101ENTRY(mcount) 198
199ENTRY(function_hook)
102 cmpl $0, function_trace_stop 200 cmpl $0, function_trace_stop
103 jne ftrace_stub 201 jne ftrace_stub
104 202
@@ -119,8 +217,12 @@ GLOBAL(ftrace_stub)
119trace: 217trace:
120 MCOUNT_SAVE_FRAME 218 MCOUNT_SAVE_FRAME
121 219
122 movq 0x38(%rsp), %rdi 220 movq RIP(%rsp), %rdi
221#ifdef CC_USING_FENTRY
222 movq SS+16(%rsp), %rsi
223#else
123 movq 8(%rbp), %rsi 224 movq 8(%rbp), %rsi
225#endif
124 subq $MCOUNT_INSN_SIZE, %rdi 226 subq $MCOUNT_INSN_SIZE, %rdi
125 227
126 call *ftrace_trace_function 228 call *ftrace_trace_function
@@ -128,20 +230,22 @@ trace:
128 MCOUNT_RESTORE_FRAME 230 MCOUNT_RESTORE_FRAME
129 231
130 jmp ftrace_stub 232 jmp ftrace_stub
131END(mcount) 233END(function_hook)
132#endif /* CONFIG_DYNAMIC_FTRACE */ 234#endif /* CONFIG_DYNAMIC_FTRACE */
133#endif /* CONFIG_FUNCTION_TRACER */ 235#endif /* CONFIG_FUNCTION_TRACER */
134 236
135#ifdef CONFIG_FUNCTION_GRAPH_TRACER 237#ifdef CONFIG_FUNCTION_GRAPH_TRACER
136ENTRY(ftrace_graph_caller) 238ENTRY(ftrace_graph_caller)
137 cmpl $0, function_trace_stop
138 jne ftrace_stub
139
140 MCOUNT_SAVE_FRAME 239 MCOUNT_SAVE_FRAME
141 240
241#ifdef CC_USING_FENTRY
242 leaq SS+16(%rsp), %rdi
243 movq $0, %rdx /* No framepointers needed */
244#else
142 leaq 8(%rbp), %rdi 245 leaq 8(%rbp), %rdi
143 movq 0x38(%rsp), %rsi
144 movq (%rbp), %rdx 246 movq (%rbp), %rdx
247#endif
248 movq RIP(%rsp), %rsi
145 subq $MCOUNT_INSN_SIZE, %rsi 249 subq $MCOUNT_INSN_SIZE, %rsi
146 250
147 call prepare_ftrace_return 251 call prepare_ftrace_return
@@ -342,15 +446,15 @@ ENDPROC(native_usergs_sysret64)
342 .macro SAVE_ARGS_IRQ 446 .macro SAVE_ARGS_IRQ
343 cld 447 cld
344 /* start from rbp in pt_regs and jump over */ 448 /* start from rbp in pt_regs and jump over */
345 movq_cfi rdi, RDI-RBP 449 movq_cfi rdi, (RDI-RBP)
346 movq_cfi rsi, RSI-RBP 450 movq_cfi rsi, (RSI-RBP)
347 movq_cfi rdx, RDX-RBP 451 movq_cfi rdx, (RDX-RBP)
348 movq_cfi rcx, RCX-RBP 452 movq_cfi rcx, (RCX-RBP)
349 movq_cfi rax, RAX-RBP 453 movq_cfi rax, (RAX-RBP)
350 movq_cfi r8, R8-RBP 454 movq_cfi r8, (R8-RBP)
351 movq_cfi r9, R9-RBP 455 movq_cfi r9, (R9-RBP)
352 movq_cfi r10, R10-RBP 456 movq_cfi r10, (R10-RBP)
353 movq_cfi r11, R11-RBP 457 movq_cfi r11, (R11-RBP)
354 458
355 /* Save rbp so that we can unwind from get_irq_regs() */ 459 /* Save rbp so that we can unwind from get_irq_regs() */
356 movq_cfi rbp, 0 460 movq_cfi rbp, 0
@@ -384,7 +488,7 @@ ENDPROC(native_usergs_sysret64)
384 .endm 488 .endm
385 489
386ENTRY(save_rest) 490ENTRY(save_rest)
387 PARTIAL_FRAME 1 REST_SKIP+8 491 PARTIAL_FRAME 1 (REST_SKIP+8)
388 movq 5*8+16(%rsp), %r11 /* save return address */ 492 movq 5*8+16(%rsp), %r11 /* save return address */
389 movq_cfi rbx, RBX+16 493 movq_cfi rbx, RBX+16
390 movq_cfi rbp, RBP+16 494 movq_cfi rbp, RBP+16
@@ -440,7 +544,7 @@ ENTRY(ret_from_fork)
440 544
441 LOCK ; btr $TIF_FORK,TI_flags(%r8) 545 LOCK ; btr $TIF_FORK,TI_flags(%r8)
442 546
443 pushq_cfi kernel_eflags(%rip) 547 pushq_cfi $0x0002
444 popfq_cfi # reset kernel eflags 548 popfq_cfi # reset kernel eflags
445 549
446 call schedule_tail # rdi: 'prev' task parameter 550 call schedule_tail # rdi: 'prev' task parameter
@@ -450,7 +554,7 @@ ENTRY(ret_from_fork)
450 RESTORE_REST 554 RESTORE_REST
451 555
452 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? 556 testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
453 jz retint_restore_args 557 jz 1f
454 558
455 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET 559 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
456 jnz int_ret_from_sys_call 560 jnz int_ret_from_sys_call
@@ -458,6 +562,14 @@ ENTRY(ret_from_fork)
458 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET 562 RESTORE_TOP_OF_STACK %rdi, -ARGOFFSET
459 jmp ret_from_sys_call # go to the SYSRET fastpath 563 jmp ret_from_sys_call # go to the SYSRET fastpath
460 564
5651:
566 subq $REST_SKIP, %rsp # leave space for volatiles
567 CFI_ADJUST_CFA_OFFSET REST_SKIP
568 movq %rbp, %rdi
569 call *%rbx
570 movl $0, RAX(%rsp)
571 RESTORE_REST
572 jmp int_ret_from_sys_call
461 CFI_ENDPROC 573 CFI_ENDPROC
462END(ret_from_fork) 574END(ret_from_fork)
463 575
@@ -465,7 +577,8 @@ END(ret_from_fork)
465 * System call entry. Up to 6 arguments in registers are supported. 577 * System call entry. Up to 6 arguments in registers are supported.
466 * 578 *
467 * SYSCALL does not save anything on the stack and does not change the 579 * SYSCALL does not save anything on the stack and does not change the
468 * stack pointer. 580 * stack pointer. However, it does mask the flags register for us, so
581 * CLD and CLAC are not needed.
469 */ 582 */
470 583
471/* 584/*
@@ -565,7 +678,7 @@ sysret_careful:
565 TRACE_IRQS_ON 678 TRACE_IRQS_ON
566 ENABLE_INTERRUPTS(CLBR_NONE) 679 ENABLE_INTERRUPTS(CLBR_NONE)
567 pushq_cfi %rdi 680 pushq_cfi %rdi
568 call schedule 681 SCHEDULE_USER
569 popq_cfi %rdi 682 popq_cfi %rdi
570 jmp sysret_check 683 jmp sysret_check
571 684
@@ -678,7 +791,7 @@ int_careful:
678 TRACE_IRQS_ON 791 TRACE_IRQS_ON
679 ENABLE_INTERRUPTS(CLBR_NONE) 792 ENABLE_INTERRUPTS(CLBR_NONE)
680 pushq_cfi %rdi 793 pushq_cfi %rdi
681 call schedule 794 SCHEDULE_USER
682 popq_cfi %rdi 795 popq_cfi %rdi
683 DISABLE_INTERRUPTS(CLBR_NONE) 796 DISABLE_INTERRUPTS(CLBR_NONE)
684 TRACE_IRQS_OFF 797 TRACE_IRQS_OFF
@@ -757,7 +870,6 @@ ENTRY(stub_execve)
757 PARTIAL_FRAME 0 870 PARTIAL_FRAME 0
758 SAVE_REST 871 SAVE_REST
759 FIXUP_TOP_OF_STACK %r11 872 FIXUP_TOP_OF_STACK %r11
760 movq %rsp, %rcx
761 call sys_execve 873 call sys_execve
762 RESTORE_TOP_OF_STACK %r11 874 RESTORE_TOP_OF_STACK %r11
763 movq %rax,RAX(%rsp) 875 movq %rax,RAX(%rsp)
@@ -807,8 +919,7 @@ ENTRY(stub_x32_execve)
807 PARTIAL_FRAME 0 919 PARTIAL_FRAME 0
808 SAVE_REST 920 SAVE_REST
809 FIXUP_TOP_OF_STACK %r11 921 FIXUP_TOP_OF_STACK %r11
810 movq %rsp, %rcx 922 call compat_sys_execve
811 call sys32_execve
812 RESTORE_TOP_OF_STACK %r11 923 RESTORE_TOP_OF_STACK %r11
813 movq %rax,RAX(%rsp) 924 movq %rax,RAX(%rsp)
814 RESTORE_REST 925 RESTORE_REST
@@ -884,6 +995,7 @@ END(interrupt)
884 */ 995 */
885 .p2align CONFIG_X86_L1_CACHE_SHIFT 996 .p2align CONFIG_X86_L1_CACHE_SHIFT
886common_interrupt: 997common_interrupt:
998 ASM_CLAC
887 XCPT_FRAME 999 XCPT_FRAME
888 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */ 1000 addq $-0x80,(%rsp) /* Adjust vector to [-256,-1] range */
889 interrupt do_IRQ 1001 interrupt do_IRQ
@@ -974,7 +1086,7 @@ retint_careful:
974 TRACE_IRQS_ON 1086 TRACE_IRQS_ON
975 ENABLE_INTERRUPTS(CLBR_NONE) 1087 ENABLE_INTERRUPTS(CLBR_NONE)
976 pushq_cfi %rdi 1088 pushq_cfi %rdi
977 call schedule 1089 SCHEDULE_USER
978 popq_cfi %rdi 1090 popq_cfi %rdi
979 GET_THREAD_INFO(%rcx) 1091 GET_THREAD_INFO(%rcx)
980 DISABLE_INTERRUPTS(CLBR_NONE) 1092 DISABLE_INTERRUPTS(CLBR_NONE)
@@ -1023,6 +1135,7 @@ END(common_interrupt)
1023 */ 1135 */
1024.macro apicinterrupt num sym do_sym 1136.macro apicinterrupt num sym do_sym
1025ENTRY(\sym) 1137ENTRY(\sym)
1138 ASM_CLAC
1026 INTR_FRAME 1139 INTR_FRAME
1027 pushq_cfi $~(\num) 1140 pushq_cfi $~(\num)
1028.Lcommon_\sym: 1141.Lcommon_\sym:
@@ -1077,6 +1190,7 @@ apicinterrupt IRQ_WORK_VECTOR \
1077 */ 1190 */
1078.macro zeroentry sym do_sym 1191.macro zeroentry sym do_sym
1079ENTRY(\sym) 1192ENTRY(\sym)
1193 ASM_CLAC
1080 INTR_FRAME 1194 INTR_FRAME
1081 PARAVIRT_ADJUST_EXCEPTION_FRAME 1195 PARAVIRT_ADJUST_EXCEPTION_FRAME
1082 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1196 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
@@ -1094,6 +1208,7 @@ END(\sym)
1094 1208
1095.macro paranoidzeroentry sym do_sym 1209.macro paranoidzeroentry sym do_sym
1096ENTRY(\sym) 1210ENTRY(\sym)
1211 ASM_CLAC
1097 INTR_FRAME 1212 INTR_FRAME
1098 PARAVIRT_ADJUST_EXCEPTION_FRAME 1213 PARAVIRT_ADJUST_EXCEPTION_FRAME
1099 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1214 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
@@ -1112,6 +1227,7 @@ END(\sym)
1112#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8) 1227#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
1113.macro paranoidzeroentry_ist sym do_sym ist 1228.macro paranoidzeroentry_ist sym do_sym ist
1114ENTRY(\sym) 1229ENTRY(\sym)
1230 ASM_CLAC
1115 INTR_FRAME 1231 INTR_FRAME
1116 PARAVIRT_ADJUST_EXCEPTION_FRAME 1232 PARAVIRT_ADJUST_EXCEPTION_FRAME
1117 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */ 1233 pushq_cfi $-1 /* ORIG_RAX: no syscall to restart */
@@ -1131,6 +1247,7 @@ END(\sym)
1131 1247
1132.macro errorentry sym do_sym 1248.macro errorentry sym do_sym
1133ENTRY(\sym) 1249ENTRY(\sym)
1250 ASM_CLAC
1134 XCPT_FRAME 1251 XCPT_FRAME
1135 PARAVIRT_ADJUST_EXCEPTION_FRAME 1252 PARAVIRT_ADJUST_EXCEPTION_FRAME
1136 subq $ORIG_RAX-R15, %rsp 1253 subq $ORIG_RAX-R15, %rsp
@@ -1149,6 +1266,7 @@ END(\sym)
1149 /* error code is on the stack already */ 1266 /* error code is on the stack already */
1150.macro paranoiderrorentry sym do_sym 1267.macro paranoiderrorentry sym do_sym
1151ENTRY(\sym) 1268ENTRY(\sym)
1269 ASM_CLAC
1152 XCPT_FRAME 1270 XCPT_FRAME
1153 PARAVIRT_ADJUST_EXCEPTION_FRAME 1271 PARAVIRT_ADJUST_EXCEPTION_FRAME
1154 subq $ORIG_RAX-R15, %rsp 1272 subq $ORIG_RAX-R15, %rsp
@@ -1206,52 +1324,6 @@ bad_gs:
1206 jmp 2b 1324 jmp 2b
1207 .previous 1325 .previous
1208 1326
1209ENTRY(kernel_thread_helper)
1210 pushq $0 # fake return address
1211 CFI_STARTPROC
1212 /*
1213 * Here we are in the child and the registers are set as they were
1214 * at kernel_thread() invocation in the parent.
1215 */
1216 call *%rsi
1217 # exit
1218 mov %eax, %edi
1219 call do_exit
1220 ud2 # padding for call trace
1221 CFI_ENDPROC
1222END(kernel_thread_helper)
1223
1224/*
1225 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
1226 *
1227 * C extern interface:
1228 * extern long execve(const char *name, char **argv, char **envp)
1229 *
1230 * asm input arguments:
1231 * rdi: name, rsi: argv, rdx: envp
1232 *
1233 * We want to fallback into:
1234 * extern long sys_execve(const char *name, char **argv,char **envp, struct pt_regs *regs)
1235 *
1236 * do_sys_execve asm fallback arguments:
1237 * rdi: name, rsi: argv, rdx: envp, rcx: fake frame on the stack
1238 */
1239ENTRY(kernel_execve)
1240 CFI_STARTPROC
1241 FAKE_STACK_FRAME $0
1242 SAVE_ALL
1243 movq %rsp,%rcx
1244 call sys_execve
1245 movq %rax, RAX(%rsp)
1246 RESTORE_REST
1247 testq %rax,%rax
1248 je int_ret_from_sys_call
1249 RESTORE_ARGS
1250 UNFAKE_STACK_FRAME
1251 ret
1252 CFI_ENDPROC
1253END(kernel_execve)
1254
1255/* Call softirq on interrupt stack. Interrupts are off. */ 1327/* Call softirq on interrupt stack. Interrupts are off. */
1256ENTRY(call_softirq) 1328ENTRY(call_softirq)
1257 CFI_STARTPROC 1329 CFI_STARTPROC
@@ -1449,7 +1521,7 @@ paranoid_userspace:
1449paranoid_schedule: 1521paranoid_schedule:
1450 TRACE_IRQS_ON 1522 TRACE_IRQS_ON
1451 ENABLE_INTERRUPTS(CLBR_ANY) 1523 ENABLE_INTERRUPTS(CLBR_ANY)
1452 call schedule 1524 SCHEDULE_USER
1453 DISABLE_INTERRUPTS(CLBR_ANY) 1525 DISABLE_INTERRUPTS(CLBR_ANY)
1454 TRACE_IRQS_OFF 1526 TRACE_IRQS_OFF
1455 jmp paranoid_userspace 1527 jmp paranoid_userspace