aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
authorDavid A. Long <dave.long@linaro.org>2016-09-12 14:21:27 -0400
committerWill Deacon <will.deacon@arm.com>2016-09-15 03:33:46 -0400
commit3e593f66754def77fa3433c595f941f1defe4af1 (patch)
treeb7720aa433293cdefb7bd457512a7c6a51e0e105 /arch/arm64/kernel
parente506236a7b8140d73b35fee80f7e38c794dd931d (diff)
arm64: Improve kprobes test for atomic sequence
Kprobes searches backwards a finite number of instructions to determine if there is an attempt to probe a load/store exclusive sequence. It stops when it hits the maximum number of instructions or a load or store exclusive. However this means it can run up past the beginning of the function and start looking at literal constants. This has been shown to cause a false positive and blocks insertion of the probe. To fix this, further limit the backwards search to stop if it hits a symbol address from kallsyms. The presumption is that this is the entry point to this code (particularly for the common case of placing probes at the beginning of functions). This also improves efficiency by not searching code that is not part of the function. There may be some possibility that the label might not denote the entry path to the probed instruction but the likelihood seems low and this is just another example of how the kprobes user really needs to be careful about what they are doing. Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: David A. Long <dave.long@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/probes/decode-insn.c48
1 files changed, 23 insertions, 25 deletions
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c
index 37e47a9d617e..d1731bf977ef 100644
--- a/arch/arm64/kernel/probes/decode-insn.c
+++ b/arch/arm64/kernel/probes/decode-insn.c
@@ -16,6 +16,7 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/kallsyms.h>
19#include <asm/kprobes.h> 20#include <asm/kprobes.h>
20#include <asm/insn.h> 21#include <asm/insn.h>
21#include <asm/sections.h> 22#include <asm/sections.h>
@@ -122,7 +123,7 @@ arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
122static bool __kprobes 123static bool __kprobes
123is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end) 124is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
124{ 125{
125 while (scan_start > scan_end) { 126 while (scan_start >= scan_end) {
126 /* 127 /*
127 * atomic region starts from exclusive load and ends with 128 * atomic region starts from exclusive load and ends with
128 * exclusive store. 129 * exclusive store.
@@ -142,33 +143,30 @@ arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
142{ 143{
143 enum kprobe_insn decoded; 144 enum kprobe_insn decoded;
144 kprobe_opcode_t insn = le32_to_cpu(*addr); 145 kprobe_opcode_t insn = le32_to_cpu(*addr);
145 kprobe_opcode_t *scan_start = addr - 1; 146 kprobe_opcode_t *scan_end = NULL;
146 kprobe_opcode_t *scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE; 147 unsigned long size = 0, offset = 0;
147#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 148
148 struct module *mod; 149 /*
149#endif 150 * If there's a symbol defined in front of and near enough to
150 151 * the probe address assume it is the entry point to this
151 if (addr >= (kprobe_opcode_t *)_text && 152 * code and use it to further limit how far back we search
152 scan_end < (kprobe_opcode_t *)_text) 153 * when determining if we're in an atomic sequence. If we could
153 scan_end = (kprobe_opcode_t *)_text; 154 * not find any symbol skip the atomic test altogether as we
154#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) 155 * could otherwise end up searching irrelevant text/literals.
155 else { 156 * KPROBES depends on KALLSYMS so this last case should never
156 preempt_disable(); 157 * happen.
157 mod = __module_address((unsigned long)addr); 158 */
158 if (mod && within_module_init((unsigned long)addr, mod) && 159 if (kallsyms_lookup_size_offset((unsigned long) addr, &size, &offset)) {
159 !within_module_init((unsigned long)scan_end, mod)) 160 if (offset < (MAX_ATOMIC_CONTEXT_SIZE*sizeof(kprobe_opcode_t)))
160 scan_end = (kprobe_opcode_t *)mod->init_layout.base; 161 scan_end = addr - (offset / sizeof(kprobe_opcode_t));
161 else if (mod && within_module_core((unsigned long)addr, mod) && 162 else
162 !within_module_core((unsigned long)scan_end, mod)) 163 scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
163 scan_end = (kprobe_opcode_t *)mod->core_layout.base;
164 preempt_enable();
165 } 164 }
166#endif
167 decoded = arm_probe_decode_insn(insn, asi); 165 decoded = arm_probe_decode_insn(insn, asi);
168 166
169 if (decoded == INSN_REJECTED || 167 if (decoded != INSN_REJECTED && scan_end)
170 is_probed_address_atomic(scan_start, scan_end)) 168 if (is_probed_address_atomic(addr - 1, scan_end))
171 return INSN_REJECTED; 169 return INSN_REJECTED;
172 170
173 return decoded; 171 return decoded;
174} 172}