aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c2
-rw-r--r--arch/x86/kernel/jump_label.c62
-rw-r--r--arch/x86/kernel/kprobes/core.c38
-rw-r--r--arch/x86/kernel/macros.S16
-rw-r--r--arch/x86/kernel/module.c6
-rw-r--r--arch/x86/kernel/traps.c16
6 files changed, 61 insertions, 79 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 953b3ce92dcc..ef8fd1f2ede0 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1315,7 +1315,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1315 local_irq_disable(); 1315 local_irq_disable();
1316 ist_end_non_atomic(); 1316 ist_end_non_atomic();
1317 } else { 1317 } else {
1318 if (!fixup_exception(regs, X86_TRAP_MC)) 1318 if (!fixup_exception(regs, X86_TRAP_MC, error_code, 0))
1319 mce_panic("Failed kernel mode recovery", &m, NULL); 1319 mce_panic("Failed kernel mode recovery", &m, NULL);
1320 } 1320 }
1321 1321
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index eeea935e9bb5..aac0c1f7e354 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -42,55 +42,40 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
42 void *(*poker)(void *, const void *, size_t), 42 void *(*poker)(void *, const void *, size_t),
43 int init) 43 int init)
44{ 44{
45 union jump_code_union code; 45 union jump_code_union jmp;
46 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP }; 46 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
47 const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5]; 47 const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
48 const void *expect, *code;
49 int line;
50
51 jmp.jump = 0xe9;
52 jmp.offset = jump_entry_target(entry) -
53 (jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
48 54
49 if (early_boot_irqs_disabled) 55 if (early_boot_irqs_disabled)
50 poker = text_poke_early; 56 poker = text_poke_early;
51 57
52 if (type == JUMP_LABEL_JMP) { 58 if (type == JUMP_LABEL_JMP) {
53 if (init) { 59 if (init) {
54 /* 60 expect = default_nop; line = __LINE__;
55 * Jump label is enabled for the first time.
56 * So we expect a default_nop...
57 */
58 if (unlikely(memcmp((void *)entry->code, default_nop, 5)
59 != 0))
60 bug_at((void *)entry->code, __LINE__);
61 } else { 61 } else {
62 /* 62 expect = ideal_nop; line = __LINE__;
63 * ...otherwise expect an ideal_nop. Otherwise
64 * something went horribly wrong.
65 */
66 if (unlikely(memcmp((void *)entry->code, ideal_nop, 5)
67 != 0))
68 bug_at((void *)entry->code, __LINE__);
69 } 63 }
70 64
71 code.jump = 0xe9; 65 code = &jmp.code;
72 code.offset = entry->target -
73 (entry->code + JUMP_LABEL_NOP_SIZE);
74 } else { 66 } else {
75 /*
76 * We are disabling this jump label. If it is not what
77 * we think it is, then something must have gone wrong.
78 * If this is the first initialization call, then we
79 * are converting the default nop to the ideal nop.
80 */
81 if (init) { 67 if (init) {
82 if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0)) 68 expect = default_nop; line = __LINE__;
83 bug_at((void *)entry->code, __LINE__);
84 } else { 69 } else {
85 code.jump = 0xe9; 70 expect = &jmp.code; line = __LINE__;
86 code.offset = entry->target -
87 (entry->code + JUMP_LABEL_NOP_SIZE);
88 if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
89 bug_at((void *)entry->code, __LINE__);
90 } 71 }
91 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE); 72
73 code = ideal_nop;
92 } 74 }
93 75
76 if (memcmp((void *)jump_entry_code(entry), expect, JUMP_LABEL_NOP_SIZE))
77 bug_at((void *)jump_entry_code(entry), line);
78
94 /* 79 /*
95 * Make text_poke_bp() a default fallback poker. 80 * Make text_poke_bp() a default fallback poker.
96 * 81 *
@@ -99,11 +84,14 @@ static void __ref __jump_label_transform(struct jump_entry *entry,
99 * always nop being the 'currently valid' instruction 84 * always nop being the 'currently valid' instruction
100 * 85 *
101 */ 86 */
102 if (poker) 87 if (poker) {
103 (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); 88 (*poker)((void *)jump_entry_code(entry), code,
104 else 89 JUMP_LABEL_NOP_SIZE);
105 text_poke_bp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE, 90 return;
106 (void *)entry->code + JUMP_LABEL_NOP_SIZE); 91 }
92
93 text_poke_bp((void *)jump_entry_code(entry), code, JUMP_LABEL_NOP_SIZE,
94 (void *)jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
107} 95}
108 96
109void arch_jump_label_transform(struct jump_entry *entry, 97void arch_jump_label_transform(struct jump_entry *entry,
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index b0d1e81c96bb..f72a47b602e2 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1020,50 +1020,12 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
1020 */ 1020 */
1021 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) 1021 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
1022 return 1; 1022 return 1;
1023
1024 /*
1025 * In case the user-specified fault handler returned
1026 * zero, try to fix up.
1027 */
1028 if (fixup_exception(regs, trapnr))
1029 return 1;
1030
1031 /*
1032 * fixup routine could not handle it,
1033 * Let do_page_fault() fix it.
1034 */
1035 } 1023 }
1036 1024
1037 return 0; 1025 return 0;
1038} 1026}
1039NOKPROBE_SYMBOL(kprobe_fault_handler); 1027NOKPROBE_SYMBOL(kprobe_fault_handler);
1040 1028
1041/*
1042 * Wrapper routine for handling exceptions.
1043 */
1044int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
1045 void *data)
1046{
1047 struct die_args *args = data;
1048 int ret = NOTIFY_DONE;
1049
1050 if (args->regs && user_mode(args->regs))
1051 return ret;
1052
1053 if (val == DIE_GPF) {
1054 /*
1055 * To be potentially processing a kprobe fault and to
1056 * trust the result from kprobe_running(), we have
1057 * be non-preemptible.
1058 */
1059 if (!preemptible() && kprobe_running() &&
1060 kprobe_fault_handler(args->regs, args->trapnr))
1061 ret = NOTIFY_STOP;
1062 }
1063 return ret;
1064}
1065NOKPROBE_SYMBOL(kprobe_exceptions_notify);
1066
1067bool arch_within_kprobe_blacklist(unsigned long addr) 1029bool arch_within_kprobe_blacklist(unsigned long addr)
1068{ 1030{
1069 bool is_in_entry_trampoline_section = false; 1031 bool is_in_entry_trampoline_section = false;
diff --git a/arch/x86/kernel/macros.S b/arch/x86/kernel/macros.S
new file mode 100644
index 000000000000..161c95059044
--- /dev/null
+++ b/arch/x86/kernel/macros.S
@@ -0,0 +1,16 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2
3/*
4 * This file includes headers whose assembly part includes macros which are
5 * commonly used. The macros are precompiled into assmebly file which is later
6 * assembled together with each compiled file.
7 */
8
9#include <linux/compiler.h>
10#include <asm/refcount.h>
11#include <asm/alternative-asm.h>
12#include <asm/bug.h>
13#include <asm/paravirt.h>
14#include <asm/asm.h>
15#include <asm/cpufeature.h>
16#include <asm/jump_label.h>
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index f58336af095c..b052e883dd8c 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -201,6 +201,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
201 goto overflow; 201 goto overflow;
202#endif 202#endif
203 break; 203 break;
204 case R_X86_64_PC64:
205 if (*(u64 *)loc != 0)
206 goto invalid_relocation;
207 val -= (u64)loc;
208 *(u64 *)loc = val;
209 break;
204 default: 210 default:
205 pr_err("%s: Unknown rela relocation: %llu\n", 211 pr_err("%s: Unknown rela relocation: %llu\n",
206 me->name, ELF64_R_TYPE(rel[i].r_info)); 212 me->name, ELF64_R_TYPE(rel[i].r_info));
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index e6db475164ed..16c95cb90496 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -206,7 +206,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
206 } 206 }
207 207
208 if (!user_mode(regs)) { 208 if (!user_mode(regs)) {
209 if (fixup_exception(regs, trapnr)) 209 if (fixup_exception(regs, trapnr, error_code, 0))
210 return 0; 210 return 0;
211 211
212 tsk->thread.error_code = error_code; 212 tsk->thread.error_code = error_code;
@@ -551,11 +551,21 @@ do_general_protection(struct pt_regs *regs, long error_code)
551 551
552 tsk = current; 552 tsk = current;
553 if (!user_mode(regs)) { 553 if (!user_mode(regs)) {
554 if (fixup_exception(regs, X86_TRAP_GP)) 554 if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
555 return; 555 return;
556 556
557 tsk->thread.error_code = error_code; 557 tsk->thread.error_code = error_code;
558 tsk->thread.trap_nr = X86_TRAP_GP; 558 tsk->thread.trap_nr = X86_TRAP_GP;
559
560 /*
561 * To be potentially processing a kprobe fault and to
562 * trust the result from kprobe_running(), we have to
563 * be non-preemptible.
564 */
565 if (!preemptible() && kprobe_running() &&
566 kprobe_fault_handler(regs, X86_TRAP_GP))
567 return;
568
559 if (notify_die(DIE_GPF, "general protection fault", regs, error_code, 569 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
560 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) 570 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
561 die("general protection fault", regs, error_code); 571 die("general protection fault", regs, error_code);
@@ -838,7 +848,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
838 cond_local_irq_enable(regs); 848 cond_local_irq_enable(regs);
839 849
840 if (!user_mode(regs)) { 850 if (!user_mode(regs)) {
841 if (fixup_exception(regs, trapnr)) 851 if (fixup_exception(regs, trapnr, error_code, 0))
842 return; 852 return;
843 853
844 task->thread.error_code = error_code; 854 task->thread.error_code = error_code;