diff options
author | Oleg Nesterov <oleg@redhat.com> | 2014-03-31 12:35:09 -0400 |
---|---|---|
committer | Oleg Nesterov <oleg@redhat.com> | 2014-04-17 15:58:17 -0400 |
commit | d20737c07a1063d681fe9fb86f3da369da1edab7 (patch) | |
tree | cdac2c07d45ab97a9238f8c324a93857bd4016fe /arch | |
parent | 59078d4b96bb548f97d9fb429b929a289e4884d9 (diff) |
uprobes/x86: Gather "riprel" functions together
Cosmetic. Move pre_xol_rip_insn() and handle_riprel_post_xol() up to
the closely related handle_riprel_insn(). This way it is simpler to
read and understand this code, and this lessens the number of ifdef's.
While at it, update the comment in handle_riprel_post_xol() as Jim
suggested.
TODO: rename them somehow to make the naming consistent.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Reviewed-by: Jim Keniston <jkenisto@us.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/uprobes.c | 118 |
1 files changed, 53 insertions, 65 deletions
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 963c121c0307..c52c30fa7871 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c | |||
@@ -313,6 +313,48 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) | |||
313 | } | 313 | } |
314 | } | 314 | } |
315 | 315 | ||
316 | /* | ||
317 | * If we're emulating a rip-relative instruction, save the contents | ||
318 | * of the scratch register and store the target address in that register. | ||
319 | */ | ||
320 | static void | ||
321 | pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, | ||
322 | struct arch_uprobe_task *autask) | ||
323 | { | ||
324 | if (auprobe->fixups & UPROBE_FIX_RIP_AX) { | ||
325 | autask->saved_scratch_register = regs->ax; | ||
326 | regs->ax = current->utask->vaddr; | ||
327 | regs->ax += auprobe->rip_rela_target_address; | ||
328 | } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) { | ||
329 | autask->saved_scratch_register = regs->cx; | ||
330 | regs->cx = current->utask->vaddr; | ||
331 | regs->cx += auprobe->rip_rela_target_address; | ||
332 | } | ||
333 | } | ||
334 | |||
335 | static void | ||
336 | handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) | ||
337 | { | ||
338 | if (auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { | ||
339 | struct arch_uprobe_task *autask; | ||
340 | |||
341 | autask = ¤t->utask->autask; | ||
342 | if (auprobe->fixups & UPROBE_FIX_RIP_AX) | ||
343 | regs->ax = autask->saved_scratch_register; | ||
344 | else | ||
345 | regs->cx = autask->saved_scratch_register; | ||
346 | |||
347 | /* | ||
348 | * The original instruction includes a displacement, and so | ||
349 | * is 4 bytes longer than what we've just single-stepped. | ||
350 | * Caller may need to apply other fixups to handle stuff | ||
351 | * like "jmpq *...(%rip)" and "callq *...(%rip)". | ||
352 | */ | ||
353 | if (correction) | ||
354 | *correction += 4; | ||
355 | } | ||
356 | } | ||
357 | |||
316 | static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn) | 358 | static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn) |
317 | { | 359 | { |
318 | insn_init(insn, auprobe->insn, true); | 360 | insn_init(insn, auprobe->insn, true); |
@@ -339,9 +381,19 @@ static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, | |||
339 | return validate_insn_64bits(auprobe, insn); | 381 | return validate_insn_64bits(auprobe, insn); |
340 | } | 382 | } |
341 | #else /* 32-bit: */ | 383 | #else /* 32-bit: */ |
384 | /* | ||
385 | * No RIP-relative addressing on 32-bit | ||
386 | */ | ||
342 | static void handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) | 387 | static void handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) |
343 | { | 388 | { |
344 | /* No RIP-relative addressing on 32-bit */ | 389 | } |
390 | static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, | ||
391 | struct arch_uprobe_task *autask) | ||
392 | { | ||
393 | } | ||
394 | static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, | ||
395 | long *correction) | ||
396 | { | ||
345 | } | 397 | } |
346 | 398 | ||
347 | static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn) | 399 | static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn) |
@@ -415,34 +467,6 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, | |||
415 | return 0; | 467 | return 0; |
416 | } | 468 | } |
417 | 469 | ||
418 | #ifdef CONFIG_X86_64 | ||
419 | /* | ||
420 | * If we're emulating a rip-relative instruction, save the contents | ||
421 | * of the scratch register and store the target address in that register. | ||
422 | */ | ||
423 | static void | ||
424 | pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, | ||
425 | struct arch_uprobe_task *autask) | ||
426 | { | ||
427 | if (auprobe->fixups & UPROBE_FIX_RIP_AX) { | ||
428 | autask->saved_scratch_register = regs->ax; | ||
429 | regs->ax = current->utask->vaddr; | ||
430 | regs->ax += auprobe->rip_rela_target_address; | ||
431 | } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) { | ||
432 | autask->saved_scratch_register = regs->cx; | ||
433 | regs->cx = current->utask->vaddr; | ||
434 | regs->cx += auprobe->rip_rela_target_address; | ||
435 | } | ||
436 | } | ||
437 | #else | ||
438 | static void | ||
439 | pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, | ||
440 | struct arch_uprobe_task *autask) | ||
441 | { | ||
442 | /* No RIP-relative addressing on 32-bit */ | ||
443 | } | ||
444 | #endif | ||
445 | |||
446 | /* | 470 | /* |
447 | * arch_uprobe_pre_xol - prepare to execute out of line. | 471 | * arch_uprobe_pre_xol - prepare to execute out of line. |
448 | * @auprobe: the probepoint information. | 472 | * @auprobe: the probepoint information. |
@@ -492,42 +516,6 @@ static int adjust_ret_addr(unsigned long sp, long correction) | |||
492 | return 0; | 516 | return 0; |
493 | } | 517 | } |
494 | 518 | ||
495 | #ifdef CONFIG_X86_64 | ||
496 | static bool is_riprel_insn(struct arch_uprobe *auprobe) | ||
497 | { | ||
498 | return ((auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) != 0); | ||
499 | } | ||
500 | |||
501 | static void | ||
502 | handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) | ||
503 | { | ||
504 | if (is_riprel_insn(auprobe)) { | ||
505 | struct arch_uprobe_task *autask; | ||
506 | |||
507 | autask = ¤t->utask->autask; | ||
508 | if (auprobe->fixups & UPROBE_FIX_RIP_AX) | ||
509 | regs->ax = autask->saved_scratch_register; | ||
510 | else | ||
511 | regs->cx = autask->saved_scratch_register; | ||
512 | |||
513 | /* | ||
514 | * The original instruction includes a displacement, and so | ||
515 | * is 4 bytes longer than what we've just single-stepped. | ||
516 | * Fall through to handle stuff like "jmpq *...(%rip)" and | ||
517 | * "callq *...(%rip)". | ||
518 | */ | ||
519 | if (correction) | ||
520 | *correction += 4; | ||
521 | } | ||
522 | } | ||
523 | #else | ||
524 | static void | ||
525 | handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) | ||
526 | { | ||
527 | /* No RIP-relative addressing on 32-bit */ | ||
528 | } | ||
529 | #endif | ||
530 | |||
531 | /* | 519 | /* |
532 | * If xol insn itself traps and generates a signal(Say, | 520 | * If xol insn itself traps and generates a signal(Say, |
533 | * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped | 521 | * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped |