diff options
author | Andy Lutomirski <luto@kernel.org> | 2015-11-12 15:59:04 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-11-24 03:56:44 -0500 |
commit | 478dc89cf316697e8029411a64ea2b30c528434d (patch) | |
tree | f2d9b65ba366429ab2f8932e01d05a96d4efe492 | |
parent | 2671c3e4fe2a34bd9bf2eecdf5d1149d4b55dbdf (diff) |
x86/entry/64: Bypass enter_from_user_mode on non-context-tracking boots
On CONFIG_CONTEXT_TRACKING kernels that have context tracking
disabled at runtime (which includes most distro kernels), we
still have the overhead of a call to enter_from_user_mode in
interrupt and exception entries.
If jump labels are available, this uses the jump label
infrastructure to skip the call.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/73ee804fff48cd8c66b65b724f9f728a11a8c686.1447361906.git.luto@kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/entry/calling.h | 15 | ||||
-rw-r--r-- | arch/x86/entry/entry_64.S | 8 |
2 files changed, 17 insertions, 6 deletions
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h index 3c71dd947c7b..e32206e09868 100644 --- a/arch/x86/entry/calling.h +++ b/arch/x86/entry/calling.h | |||
@@ -1,3 +1,5 @@ | |||
1 | #include <linux/jump_label.h> | ||
2 | |||
1 | /* | 3 | /* |
2 | 4 | ||
3 | x86 function call convention, 64-bit: | 5 | x86 function call convention, 64-bit: |
@@ -232,3 +234,16 @@ For 32-bit we have the following conventions - kernel is built with | |||
232 | 234 | ||
233 | #endif /* CONFIG_X86_64 */ | 235 | #endif /* CONFIG_X86_64 */ |
234 | 236 | ||
237 | /* | ||
238 | * This does 'call enter_from_user_mode' unless we can avoid it based on | ||
239 | * kernel config or using the static jump infrastructure. | ||
240 | */ | ||
241 | .macro CALL_enter_from_user_mode | ||
242 | #ifdef CONFIG_CONTEXT_TRACKING | ||
243 | #ifdef HAVE_JUMP_LABEL | ||
244 | STATIC_JUMP_IF_FALSE .Lafter_call_\@, context_tracking_enabled, def=0 | ||
245 | #endif | ||
246 | call enter_from_user_mode | ||
247 | .Lafter_call_\@: | ||
248 | #endif | ||
249 | .endm | ||
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index a55697d19824..9d34d3cfceb6 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
@@ -520,9 +520,7 @@ END(irq_entries_start) | |||
520 | */ | 520 | */ |
521 | TRACE_IRQS_OFF | 521 | TRACE_IRQS_OFF |
522 | 522 | ||
523 | #ifdef CONFIG_CONTEXT_TRACKING | 523 | CALL_enter_from_user_mode |
524 | call enter_from_user_mode | ||
525 | #endif | ||
526 | 524 | ||
527 | 1: | 525 | 1: |
528 | /* | 526 | /* |
@@ -1066,9 +1064,7 @@ ENTRY(error_entry) | |||
1066 | * (which can take locks). | 1064 | * (which can take locks). |
1067 | */ | 1065 | */ |
1068 | TRACE_IRQS_OFF | 1066 | TRACE_IRQS_OFF |
1069 | #ifdef CONFIG_CONTEXT_TRACKING | 1067 | CALL_enter_from_user_mode |
1070 | call enter_from_user_mode | ||
1071 | #endif | ||
1072 | ret | 1068 | ret |
1073 | 1069 | ||
1074 | .Lerror_entry_done: | 1070 | .Lerror_entry_done: |