diff options
author | Masami Hiramatsu <mhiramat@redhat.com> | 2010-02-25 08:34:46 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-02-25 11:49:26 -0500 |
commit | c0f7ac3a9edde786bc129d37627953a8b8abefdf (patch) | |
tree | 8d6f2df2d1a08f2893327dd2a8b6e56525705456 /arch/x86/include/asm/kprobes.h | |
parent | 3d55cc8a058ee96291d6d45b1e35121b9920eca3 (diff) |
kprobes/x86: Support kprobes jump optimization on x86
Introduce x86 arch-specific optimization code, which supports
both of x86-32 and x86-64.
This code also supports safety checking, which decodes whole of
a function in which probe is inserted, and checks following
conditions before optimization:
- The optimized instructions which will be replaced by a jump instruction
don't straddle the function boundary.
- There is no indirect jump instruction, because it will jumps into
the address range which is replaced by jump operand.
- There is no jump/loop instruction which jumps into the address range
which is replaced by jump operand.
- Don't optimize kprobes if it is in functions into which fixup code will
jumps.
This uses text_poke_multibyte() which doesn't support modifying
code on NMI/MCE handler. However, since kprobes itself doesn't
support NMI/MCE code probing, it's not a problem.
Changes in v9:
- Use *_text_reserved() for checking the probe can be optimized.
- Verify jump address range is in 2G range when preparing slot.
- Backup original code when switching optimized buffer, instead of
preparing buffer, because there can be int3 of other probes in
preparing phase.
- Check kprobe is disabled in arch_check_optimized_kprobe().
- Strictly check indirect jump opcodes (ff /4, ff /5).
Changes in v6:
- Split stop_machine-based jump patching code.
- Update comments and coding style.
Changes in v5:
- Introduce stop_machine-based jump replacing.
Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com>
Cc: systemtap <systemtap@sources.redhat.com>
Cc: DLE <dle-develop@lists.sourceforge.net>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Anders Kaseorg <andersk@ksplice.com>
Cc: Tim Abbott <tabbott@ksplice.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Mathieu Desnoyers <compudj@krystal.dyndns.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
LKML-Reference: <20100225133446.6725.78994.stgit@localhost6.localdomain6>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include/asm/kprobes.h')
-rw-r--r-- | arch/x86/include/asm/kprobes.h | 29 |
1 files changed, 29 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index eaec8ea7bf18..4ffa345a8ccb 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h | |||
@@ -33,6 +33,9 @@ struct kprobe; | |||
33 | typedef u8 kprobe_opcode_t; | 33 | typedef u8 kprobe_opcode_t; |
34 | #define BREAKPOINT_INSTRUCTION 0xcc | 34 | #define BREAKPOINT_INSTRUCTION 0xcc |
35 | #define RELATIVEJUMP_OPCODE 0xe9 | 35 | #define RELATIVEJUMP_OPCODE 0xe9 |
36 | #define RELATIVEJUMP_SIZE 5 | ||
37 | #define RELATIVECALL_OPCODE 0xe8 | ||
38 | #define RELATIVE_ADDR_SIZE 4 | ||
36 | #define MAX_INSN_SIZE 16 | 39 | #define MAX_INSN_SIZE 16 |
37 | #define MAX_STACK_SIZE 64 | 40 | #define MAX_STACK_SIZE 64 |
38 | #define MIN_STACK_SIZE(ADDR) \ | 41 | #define MIN_STACK_SIZE(ADDR) \ |
@@ -44,6 +47,17 @@ typedef u8 kprobe_opcode_t; | |||
44 | 47 | ||
45 | #define flush_insn_slot(p) do { } while (0) | 48 | #define flush_insn_slot(p) do { } while (0) |
46 | 49 | ||
50 | /* optinsn template addresses */ | ||
51 | extern kprobe_opcode_t optprobe_template_entry; | ||
52 | extern kprobe_opcode_t optprobe_template_val; | ||
53 | extern kprobe_opcode_t optprobe_template_call; | ||
54 | extern kprobe_opcode_t optprobe_template_end; | ||
55 | #define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE) | ||
56 | #define MAX_OPTINSN_SIZE \ | ||
57 | (((unsigned long)&optprobe_template_end - \ | ||
58 | (unsigned long)&optprobe_template_entry) + \ | ||
59 | MAX_OPTIMIZED_LENGTH + RELATIVEJUMP_SIZE) | ||
60 | |||
47 | extern const int kretprobe_blacklist_size; | 61 | extern const int kretprobe_blacklist_size; |
48 | 62 | ||
49 | void arch_remove_kprobe(struct kprobe *p); | 63 | void arch_remove_kprobe(struct kprobe *p); |
@@ -64,6 +78,21 @@ struct arch_specific_insn { | |||
64 | int boostable; | 78 | int boostable; |
65 | }; | 79 | }; |
66 | 80 | ||
81 | struct arch_optimized_insn { | ||
82 | /* copy of the original instructions */ | ||
83 | kprobe_opcode_t copied_insn[RELATIVE_ADDR_SIZE]; | ||
84 | /* detour code buffer */ | ||
85 | kprobe_opcode_t *insn; | ||
86 | /* the size of instructions copied to detour code buffer */ | ||
87 | size_t size; | ||
88 | }; | ||
89 | |||
90 | /* Return true (!0) if optinsn is prepared for optimization. */ | ||
91 | static inline int arch_prepared_optinsn(struct arch_optimized_insn *optinsn) | ||
92 | { | ||
93 | return optinsn->size; | ||
94 | } | ||
95 | |||
67 | struct prev_kprobe { | 96 | struct prev_kprobe { |
68 | struct kprobe *kp; | 97 | struct kprobe *kp; |
69 | unsigned long status; | 98 | unsigned long status; |