aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>2014-04-17 04:17:47 -0400
committerIngo Molnar <mingo@kernel.org>2014-04-24 04:03:01 -0400
commit7ec8a97a990da8e3ba87175a757731e17f74072e (patch)
tree4d937ea40000427c5f469f93775b2a0a13732e17 /arch/x86/kernel
parentecd50f714c421c759354632dd00f70c718c95b10 (diff)
kprobes/x86: Allow probe on some kprobe preparation functions
There is no need to prohibit probing on the functions used in preparation phase. Those are safely probed because those are not invoked from breakpoint/fault/debug handlers, there is no chance to cause recursive exceptions. Following functions are now removed from the kprobes blacklist: can_boost can_probe can_optimize is_IF_modifier __copy_instruction copy_optimized_instructions arch_copy_kprobe arch_prepare_kprobe arch_arm_kprobe arch_disarm_kprobe arch_remove_kprobe arch_trampoline_kprobe arch_prepare_kprobe_ftrace arch_prepare_optimized_kprobe arch_check_optimized_kprobe arch_within_optimized_kprobe __arch_remove_optimized_kprobe arch_remove_optimized_kprobe arch_optimize_kprobes arch_unoptimize_kprobe I tested those functions by putting kprobes on all instructions in the functions with the bash script I sent to LKML. See: https://lkml.org/lkml/2014/3/27/33 Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Jonathan Lebon <jlebon@redhat.com> Link: http://lkml.kernel.org/r/20140417081747.26341.36065.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/kprobes/core.c20
-rw-r--r--arch/x86/kernel/kprobes/ftrace.c2
-rw-r--r--arch/x86/kernel/kprobes/opt.c24
3 files changed, 23 insertions, 23 deletions
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 9b80aec1ea1a..bd717137ae77 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -159,7 +159,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
159 * Returns non-zero if opcode is boostable. 159 * Returns non-zero if opcode is boostable.
160 * RIP relative instructions are adjusted at copying time in 64 bits mode 160 * RIP relative instructions are adjusted at copying time in 64 bits mode
161 */ 161 */
162int __kprobes can_boost(kprobe_opcode_t *opcodes) 162int can_boost(kprobe_opcode_t *opcodes)
163{ 163{
164 kprobe_opcode_t opcode; 164 kprobe_opcode_t opcode;
165 kprobe_opcode_t *orig_opcodes = opcodes; 165 kprobe_opcode_t *orig_opcodes = opcodes;
@@ -260,7 +260,7 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add
260} 260}
261 261
262/* Check if paddr is at an instruction boundary */ 262/* Check if paddr is at an instruction boundary */
263static int __kprobes can_probe(unsigned long paddr) 263static int can_probe(unsigned long paddr)
264{ 264{
265 unsigned long addr, __addr, offset = 0; 265 unsigned long addr, __addr, offset = 0;
266 struct insn insn; 266 struct insn insn;
@@ -299,7 +299,7 @@ static int __kprobes can_probe(unsigned long paddr)
299/* 299/*
300 * Returns non-zero if opcode modifies the interrupt flag. 300 * Returns non-zero if opcode modifies the interrupt flag.
301 */ 301 */
302static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) 302static int is_IF_modifier(kprobe_opcode_t *insn)
303{ 303{
304 /* Skip prefixes */ 304 /* Skip prefixes */
305 insn = skip_prefixes(insn); 305 insn = skip_prefixes(insn);
@@ -322,7 +322,7 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
322 * If not, return null. 322 * If not, return null.
323 * Only applicable to 64-bit x86. 323 * Only applicable to 64-bit x86.
324 */ 324 */
325int __kprobes __copy_instruction(u8 *dest, u8 *src) 325int __copy_instruction(u8 *dest, u8 *src)
326{ 326{
327 struct insn insn; 327 struct insn insn;
328 kprobe_opcode_t buf[MAX_INSN_SIZE]; 328 kprobe_opcode_t buf[MAX_INSN_SIZE];
@@ -365,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
365 return insn.length; 365 return insn.length;
366} 366}
367 367
368static int __kprobes arch_copy_kprobe(struct kprobe *p) 368static int arch_copy_kprobe(struct kprobe *p)
369{ 369{
370 int ret; 370 int ret;
371 371
@@ -392,7 +392,7 @@ static int __kprobes arch_copy_kprobe(struct kprobe *p)
392 return 0; 392 return 0;
393} 393}
394 394
395int __kprobes arch_prepare_kprobe(struct kprobe *p) 395int arch_prepare_kprobe(struct kprobe *p)
396{ 396{
397 if (alternatives_text_reserved(p->addr, p->addr)) 397 if (alternatives_text_reserved(p->addr, p->addr))
398 return -EINVAL; 398 return -EINVAL;
@@ -407,17 +407,17 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
407 return arch_copy_kprobe(p); 407 return arch_copy_kprobe(p);
408} 408}
409 409
410void __kprobes arch_arm_kprobe(struct kprobe *p) 410void arch_arm_kprobe(struct kprobe *p)
411{ 411{
412 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); 412 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
413} 413}
414 414
415void __kprobes arch_disarm_kprobe(struct kprobe *p) 415void arch_disarm_kprobe(struct kprobe *p)
416{ 416{
417 text_poke(p->addr, &p->opcode, 1); 417 text_poke(p->addr, &p->opcode, 1);
418} 418}
419 419
420void __kprobes arch_remove_kprobe(struct kprobe *p) 420void arch_remove_kprobe(struct kprobe *p)
421{ 421{
422 if (p->ainsn.insn) { 422 if (p->ainsn.insn) {
423 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); 423 free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1));
@@ -1060,7 +1060,7 @@ int __init arch_init_kprobes(void)
1060 return 0; 1060 return 0;
1061} 1061}
1062 1062
1063int __kprobes arch_trampoline_kprobe(struct kprobe *p) 1063int arch_trampoline_kprobe(struct kprobe *p)
1064{ 1064{
1065 return 0; 1065 return 0;
1066} 1066}
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c
index 23ef5c556f06..dcaa1310ccfd 100644
--- a/arch/x86/kernel/kprobes/ftrace.c
+++ b/arch/x86/kernel/kprobes/ftrace.c
@@ -85,7 +85,7 @@ end:
85 local_irq_restore(flags); 85 local_irq_restore(flags);
86} 86}
87 87
88int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) 88int arch_prepare_kprobe_ftrace(struct kprobe *p)
89{ 89{
90 p->ainsn.insn = NULL; 90 p->ainsn.insn = NULL;
91 p->ainsn.boostable = -1; 91 p->ainsn.boostable = -1;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 898160b42e43..fba7fb075e8a 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -77,7 +77,7 @@ found:
77} 77}
78 78
79/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ 79/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
80static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) 80static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
81{ 81{
82#ifdef CONFIG_X86_64 82#ifdef CONFIG_X86_64
83 *addr++ = 0x48; 83 *addr++ = 0x48;
@@ -169,7 +169,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_
169 local_irq_restore(flags); 169 local_irq_restore(flags);
170} 170}
171 171
172static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) 172static int copy_optimized_instructions(u8 *dest, u8 *src)
173{ 173{
174 int len = 0, ret; 174 int len = 0, ret;
175 175
@@ -189,7 +189,7 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
189} 189}
190 190
191/* Check whether insn is indirect jump */ 191/* Check whether insn is indirect jump */
192static int __kprobes insn_is_indirect_jump(struct insn *insn) 192static int insn_is_indirect_jump(struct insn *insn)
193{ 193{
194 return ((insn->opcode.bytes[0] == 0xff && 194 return ((insn->opcode.bytes[0] == 0xff &&
195 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ 195 (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
@@ -224,7 +224,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
224} 224}
225 225
226/* Decode whole function to ensure any instructions don't jump into target */ 226/* Decode whole function to ensure any instructions don't jump into target */
227static int __kprobes can_optimize(unsigned long paddr) 227static int can_optimize(unsigned long paddr)
228{ 228{
229 unsigned long addr, size = 0, offset = 0; 229 unsigned long addr, size = 0, offset = 0;
230 struct insn insn; 230 struct insn insn;
@@ -275,7 +275,7 @@ static int __kprobes can_optimize(unsigned long paddr)
275} 275}
276 276
277/* Check optimized_kprobe can actually be optimized. */ 277/* Check optimized_kprobe can actually be optimized. */
278int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) 278int arch_check_optimized_kprobe(struct optimized_kprobe *op)
279{ 279{
280 int i; 280 int i;
281 struct kprobe *p; 281 struct kprobe *p;
@@ -290,15 +290,15 @@ int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
290} 290}
291 291
292/* Check the addr is within the optimized instructions. */ 292/* Check the addr is within the optimized instructions. */
293int __kprobes 293int arch_within_optimized_kprobe(struct optimized_kprobe *op,
294arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) 294 unsigned long addr)
295{ 295{
296 return ((unsigned long)op->kp.addr <= addr && 296 return ((unsigned long)op->kp.addr <= addr &&
297 (unsigned long)op->kp.addr + op->optinsn.size > addr); 297 (unsigned long)op->kp.addr + op->optinsn.size > addr);
298} 298}
299 299
300/* Free optimized instruction slot */ 300/* Free optimized instruction slot */
301static __kprobes 301static
302void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) 302void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
303{ 303{
304 if (op->optinsn.insn) { 304 if (op->optinsn.insn) {
@@ -308,7 +308,7 @@ void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
308 } 308 }
309} 309}
310 310
311void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) 311void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
312{ 312{
313 __arch_remove_optimized_kprobe(op, 1); 313 __arch_remove_optimized_kprobe(op, 1);
314} 314}
@@ -318,7 +318,7 @@ void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
318 * Target instructions MUST be relocatable (checked inside) 318 * Target instructions MUST be relocatable (checked inside)
319 * This is called when new aggr(opt)probe is allocated or reused. 319 * This is called when new aggr(opt)probe is allocated or reused.
320 */ 320 */
321int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) 321int arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
322{ 322{
323 u8 *buf; 323 u8 *buf;
324 int ret; 324 int ret;
@@ -372,7 +372,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
372 * Replace breakpoints (int3) with relative jumps. 372 * Replace breakpoints (int3) with relative jumps.
373 * Caller must call with locking kprobe_mutex and text_mutex. 373 * Caller must call with locking kprobe_mutex and text_mutex.
374 */ 374 */
375void __kprobes arch_optimize_kprobes(struct list_head *oplist) 375void arch_optimize_kprobes(struct list_head *oplist)
376{ 376{
377 struct optimized_kprobe *op, *tmp; 377 struct optimized_kprobe *op, *tmp;
378 u8 insn_buf[RELATIVEJUMP_SIZE]; 378 u8 insn_buf[RELATIVEJUMP_SIZE];
@@ -398,7 +398,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist)
398} 398}
399 399
400/* Replace a relative jump with a breakpoint (int3). */ 400/* Replace a relative jump with a breakpoint (int3). */
401void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) 401void arch_unoptimize_kprobe(struct optimized_kprobe *op)
402{ 402{
403 u8 insn_buf[RELATIVEJUMP_SIZE]; 403 u8 insn_buf[RELATIVEJUMP_SIZE];
404 404