aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/kprobes_64.c
diff options
context:
space:
mode:
authorMasami Hiramatsu <mhiramat@redhat.com>2008-01-30 07:31:21 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:31:21 -0500
commit8533bbe9f87b01f49ff951f665ea1988252fa3c2 (patch)
tree5286298af37057c1086405a96e6dce0b0df1fb64 /arch/x86/kernel/kprobes_64.c
parentda07ab0375897bb9e108b28129df140ecd3ee94e (diff)
x86: prepare kprobes code for x86 unification
This patch cleanup kprobes code on x86 for unification. This patch is based on Arjan's previous work. - Remove spurious whitespace changes - Add harmless includes - Make the 32/64 files more identical - Generalize structure fields' and local variable name. - Wrap accessing to stack address by macros. - Modify bitmap making macro. - Merge fixup code into is_riprel() and change its name to fix_riprel(). - Set MAX_INSN_SIZE to 16 on both arch. - Use u32 for bitmaps on both architectures. - Clarify some comments. Signed-off-by: Masami Hiramatsu <mhiramat@redhat.com> Signed-off-by: Arjan van de Ven <arjan@infradead.org> Signed-off-by: Jim Keniston <jkenisto@us.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/kprobes_64.c')
-rw-r--r--arch/x86/kernel/kprobes_64.c409
1 files changed, 212 insertions, 197 deletions
diff --git a/arch/x86/kernel/kprobes_64.c b/arch/x86/kernel/kprobes_64.c
index bc93b1dd9a01..2d7763749b1b 100644
--- a/arch/x86/kernel/kprobes_64.c
+++ b/arch/x86/kernel/kprobes_64.c
@@ -40,16 +40,97 @@
40#include <linux/module.h> 40#include <linux/module.h>
41#include <linux/kdebug.h> 41#include <linux/kdebug.h>
42 42
43#include <asm/cacheflush.h>
44#include <asm/desc.h>
43#include <asm/pgtable.h> 45#include <asm/pgtable.h>
44#include <asm/uaccess.h> 46#include <asm/uaccess.h>
45#include <asm/alternative.h> 47#include <asm/alternative.h>
46 48
47void jprobe_return_end(void); 49void jprobe_return_end(void);
48static void __kprobes arch_copy_kprobe(struct kprobe *p);
49 50
50DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; 51DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
51DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); 52DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
52 53
54#define stack_addr(regs) ((unsigned long *)regs->sp)
55
56#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
57 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
58 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
59 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
60 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
61 << (row % 32))
62 /*
63 * Undefined/reserved opcodes, conditional jump, Opcode Extension
64 * Groups, and some special opcodes can not boost.
65 */
66static const u32 twobyte_is_boostable[256 / 32] = {
67 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
68 /* ---------------------------------------------- */
69 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
70 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
71 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
72 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
73 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
74 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
75 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
76 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
77 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
78 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
79 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
80 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) , /* b0 */
81 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
82 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) , /* d0 */
83 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
84 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
85 /* ----------------------------------------------- */
86 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
87};
88static const u32 onebyte_has_modrm[256 / 32] = {
89 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
90 /* ----------------------------------------------- */
91 W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 00 */
92 W(0x10, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 10 */
93 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 20 */
94 W(0x30, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) , /* 30 */
95 W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
96 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 50 */
97 W(0x60, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0) | /* 60 */
98 W(0x70, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 70 */
99 W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
100 W(0x90, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 90 */
101 W(0xa0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* a0 */
102 W(0xb0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* b0 */
103 W(0xc0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* c0 */
104 W(0xd0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
105 W(0xe0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* e0 */
106 W(0xf0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) /* f0 */
107 /* ----------------------------------------------- */
108 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
109};
110static const u32 twobyte_has_modrm[256 / 32] = {
111 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
112 /* ----------------------------------------------- */
113 W(0x00, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1) | /* 0f */
114 W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0) , /* 1f */
115 W(0x20, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 2f */
116 W(0x30, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 3f */
117 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 4f */
118 W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 5f */
119 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 6f */
120 W(0x70, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1) , /* 7f */
121 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 8f */
122 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 9f */
123 W(0xa0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1) | /* af */
124 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* bf */
125 W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0) | /* cf */
126 W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* df */
127 W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* ef */
128 W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* ff */
129 /* ----------------------------------------------- */
130 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
131};
132#undef W
133
53struct kretprobe_blackpoint kretprobe_blacklist[] = { 134struct kretprobe_blackpoint kretprobe_blacklist[] = {
54 {"__switch_to", }, /* This function switches only current task, but 135 {"__switch_to", }, /* This function switches only current task, but
55 doesn't switch kernel stack.*/ 136 doesn't switch kernel stack.*/
@@ -70,44 +151,11 @@ static __always_inline void set_jmp_op(void *from, void *to)
70} 151}
71 152
72/* 153/*
73 * returns non-zero if opcode is boostable 154 * returns non-zero if opcode is boostable.
74 * RIP relative instructions are adjusted at copying time 155 * RIP relative instructions are adjusted at copying time
75 */ 156 */
76static __always_inline int can_boost(kprobe_opcode_t *opcodes) 157static __always_inline int can_boost(kprobe_opcode_t *opcodes)
77{ 158{
78#define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf)\
79 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
80 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
81 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
82 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
83 << (row % 64))
84 /*
85 * Undefined/reserved opcodes, conditional jump, Opcode Extension
86 * Groups, and some special opcodes can not boost.
87 */
88 static const unsigned long twobyte_is_boostable[256 / 64] = {
89 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
90 /* ---------------------------------------------- */
91 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0)|/* 00 */
92 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 10 */
93 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 20 */
94 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),/* 30 */
95 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)|/* 40 */
96 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 50 */
97 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1)|/* 60 */
98 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1),/* 70 */
99 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)|/* 80 */
100 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)|/* 90 */
101 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* a0 */
102 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1),/* b0 */
103 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1)|/* c0 */
104 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* d0 */
105 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1)|/* e0 */
106 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
107 /* ----------------------------------------------- */
108 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
109 };
110#undef W
111 kprobe_opcode_t opcode; 159 kprobe_opcode_t opcode;
112 kprobe_opcode_t *orig_opcodes = opcodes; 160 kprobe_opcode_t *orig_opcodes = opcodes;
113 161
@@ -120,7 +168,8 @@ retry:
120 if (opcode == 0x0f) { 168 if (opcode == 0x0f) {
121 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1) 169 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
122 return 0; 170 return 0;
123 return test_bit(*opcodes, twobyte_is_boostable); 171 return test_bit(*opcodes,
172 (unsigned long *)twobyte_is_boostable);
124 } 173 }
125 174
126 switch (opcode & 0xf0) { 175 switch (opcode & 0xf0) {
@@ -169,80 +218,25 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
169 return 1; 218 return 1;
170 } 219 }
171 220
172 if (*insn >= 0x40 && *insn <= 0x4f && *++insn == 0xcf) 221 /*
173 return 1; 222 * on 64 bit x86, 0x40-0x4f are prefixes so we need to look
174 return 0; 223 * at the next byte instead.. but of course not recurse infinitely
175} 224 */
176 225 if (*insn >= 0x40 && *insn <= 0x4f)
177int __kprobes arch_prepare_kprobe(struct kprobe *p) 226 return is_IF_modifier(++insn);
178{
179 /* insn: must be on special executable page on x86_64. */
180 p->ainsn.insn = get_insn_slot();
181 if (!p->ainsn.insn) {
182 return -ENOMEM;
183 }
184 arch_copy_kprobe(p);
185 return 0; 227 return 0;
186} 228}
187 229
188/* 230/*
189 * Determine if the instruction uses the %rip-relative addressing mode. 231 * Adjust the displacement if the instruction uses the %rip-relative
232 * addressing mode.
190 * If it does, Return the address of the 32-bit displacement word. 233 * If it does, Return the address of the 32-bit displacement word.
191 * If not, return null. 234 * If not, return null.
192 */ 235 */
193static s32 __kprobes *is_riprel(u8 *insn) 236static void __kprobes fix_riprel(struct kprobe *p)
194{ 237{
195#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \ 238 u8 *insn = p->ainsn.insn;
196 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ 239 s64 disp;
197 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
198 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
199 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
200 << (row % 64))
201 static const u64 onebyte_has_modrm[256 / 64] = {
202 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
203 /* ------------------------------- */
204 W(0x00, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 00 */
205 W(0x10, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 10 */
206 W(0x20, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 20 */
207 W(0x30, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0), /* 30 */
208 W(0x40, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 40 */
209 W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 50 */
210 W(0x60, 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0)| /* 60 */
211 W(0x70, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 70 */
212 W(0x80, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */
213 W(0x90, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 90 */
214 W(0xa0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* a0 */
215 W(0xb0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* b0 */
216 W(0xc0, 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0)| /* c0 */
217 W(0xd0, 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1)| /* d0 */
218 W(0xe0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* e0 */
219 W(0xf0, 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1) /* f0 */
220 /* ------------------------------- */
221 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
222 };
223 static const u64 twobyte_has_modrm[256 / 64] = {
224 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
225 /* ------------------------------- */
226 W(0x00, 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1)| /* 0f */
227 W(0x10, 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0)| /* 1f */
228 W(0x20, 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1)| /* 2f */
229 W(0x30, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 3f */
230 W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 4f */
231 W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 5f */
232 W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 6f */
233 W(0x70, 1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1), /* 7f */
234 W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 8f */
235 W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 9f */
236 W(0xa0, 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1)| /* af */
237 W(0xb0, 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1), /* bf */
238 W(0xc0, 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0)| /* cf */
239 W(0xd0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* df */
240 W(0xe0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* ef */
241 W(0xf0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0) /* ff */
242 /* ------------------------------- */
243 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
244 };
245#undef W
246 int need_modrm; 240 int need_modrm;
247 241
248 /* Skip legacy instruction prefixes. */ 242 /* Skip legacy instruction prefixes. */
@@ -271,54 +265,60 @@ static s32 __kprobes *is_riprel(u8 *insn)
271 265
272 if (*insn == 0x0f) { /* Two-byte opcode. */ 266 if (*insn == 0x0f) { /* Two-byte opcode. */
273 ++insn; 267 ++insn;
274 need_modrm = test_bit(*insn, twobyte_has_modrm); 268 need_modrm = test_bit(*insn,
275 } else { /* One-byte opcode. */ 269 (unsigned long *)twobyte_has_modrm);
276 need_modrm = test_bit(*insn, onebyte_has_modrm); 270 } else /* One-byte opcode. */
277 } 271 need_modrm = test_bit(*insn,
272 (unsigned long *)onebyte_has_modrm);
278 273
279 if (need_modrm) { 274 if (need_modrm) {
280 u8 modrm = *++insn; 275 u8 modrm = *++insn;
281 if ((modrm & 0xc7) == 0x05) { /* %rip+disp32 addressing mode */ 276 if ((modrm & 0xc7) == 0x05) { /* %rip+disp32 addressing mode */
282 /* Displacement follows ModRM byte. */ 277 /* Displacement follows ModRM byte. */
283 return (s32 *) ++insn; 278 ++insn;
279 /*
280 * The copied instruction uses the %rip-relative
281 * addressing mode. Adjust the displacement for the
282 * difference between the original location of this
283 * instruction and the location of the copy that will
284 * actually be run. The tricky bit here is making sure
285 * that the sign extension happens correctly in this
286 * calculation, since we need a signed 32-bit result to
287 * be sign-extended to 64 bits when it's added to the
288 * %rip value and yield the same 64-bit result that the
289 * sign-extension of the original signed 32-bit
290 * displacement would have given.
291 */
292 disp = (u8 *) p->addr + *((s32 *) insn) -
293 (u8 *) p->ainsn.insn;
294 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
295 *(s32 *)insn = (s32) disp;
284 } 296 }
285 } 297 }
286
287 /* No %rip-relative addressing mode here. */
288 return NULL;
289} 298}
290 299
291static void __kprobes arch_copy_kprobe(struct kprobe *p) 300static void __kprobes arch_copy_kprobe(struct kprobe *p)
292{ 301{
293 s32 *ripdisp; 302 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
294 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE); 303 fix_riprel(p);
295 ripdisp = is_riprel(p->ainsn.insn); 304 if (can_boost(p->addr))
296 if (ripdisp) {
297 /*
298 * The copied instruction uses the %rip-relative
299 * addressing mode. Adjust the displacement for the
300 * difference between the original location of this
301 * instruction and the location of the copy that will
302 * actually be run. The tricky bit here is making sure
303 * that the sign extension happens correctly in this
304 * calculation, since we need a signed 32-bit result to
305 * be sign-extended to 64 bits when it's added to the
306 * %rip value and yield the same 64-bit result that the
307 * sign-extension of the original signed 32-bit
308 * displacement would have given.
309 */
310 s64 disp = (u8 *) p->addr + *ripdisp - (u8 *) p->ainsn.insn;
311 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
312 *ripdisp = disp;
313 }
314 if (can_boost(p->addr)) {
315 p->ainsn.boostable = 0; 305 p->ainsn.boostable = 0;
316 } else { 306 else
317 p->ainsn.boostable = -1; 307 p->ainsn.boostable = -1;
318 } 308
319 p->opcode = *p->addr; 309 p->opcode = *p->addr;
320} 310}
321 311
312int __kprobes arch_prepare_kprobe(struct kprobe *p)
313{
314 /* insn: must be on special executable page on x86. */
315 p->ainsn.insn = get_insn_slot();
316 if (!p->ainsn.insn)
317 return -ENOMEM;
318 arch_copy_kprobe(p);
319 return 0;
320}
321
322void __kprobes arch_arm_kprobe(struct kprobe *p) 322void __kprobes arch_arm_kprobe(struct kprobe *p)
323{ 323{
324 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); 324 text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1);
@@ -340,26 +340,26 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
340{ 340{
341 kcb->prev_kprobe.kp = kprobe_running(); 341 kcb->prev_kprobe.kp = kprobe_running();
342 kcb->prev_kprobe.status = kcb->kprobe_status; 342 kcb->prev_kprobe.status = kcb->kprobe_status;
343 kcb->prev_kprobe.old_rflags = kcb->kprobe_old_rflags; 343 kcb->prev_kprobe.old_flags = kcb->kprobe_old_flags;
344 kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags; 344 kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags;
345} 345}
346 346
347static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 347static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
348{ 348{
349 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 349 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
350 kcb->kprobe_status = kcb->prev_kprobe.status; 350 kcb->kprobe_status = kcb->prev_kprobe.status;
351 kcb->kprobe_old_rflags = kcb->prev_kprobe.old_rflags; 351 kcb->kprobe_old_flags = kcb->prev_kprobe.old_flags;
352 kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags; 352 kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags;
353} 353}
354 354
355static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 355static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
356 struct kprobe_ctlblk *kcb) 356 struct kprobe_ctlblk *kcb)
357{ 357{
358 __get_cpu_var(current_kprobe) = p; 358 __get_cpu_var(current_kprobe) = p;
359 kcb->kprobe_saved_rflags = kcb->kprobe_old_rflags 359 kcb->kprobe_saved_flags = kcb->kprobe_old_flags
360 = (regs->flags & (TF_MASK | IF_MASK)); 360 = (regs->flags & (TF_MASK | IF_MASK));
361 if (is_IF_modifier(p->ainsn.insn)) 361 if (is_IF_modifier(p->ainsn.insn))
362 kcb->kprobe_saved_rflags &= ~IF_MASK; 362 kcb->kprobe_saved_flags &= ~IF_MASK;
363} 363}
364 364
365static __always_inline void clear_btf(void) 365static __always_inline void clear_btf(void)
@@ -390,20 +390,27 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
390void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 390void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
391 struct pt_regs *regs) 391 struct pt_regs *regs)
392{ 392{
393 unsigned long *sara = (unsigned long *)regs->sp; 393 unsigned long *sara = stack_addr(regs);
394 394
395 ri->ret_addr = (kprobe_opcode_t *) *sara; 395 ri->ret_addr = (kprobe_opcode_t *) *sara;
396
396 /* Replace the return addr with trampoline addr */ 397 /* Replace the return addr with trampoline addr */
397 *sara = (unsigned long) &kretprobe_trampoline; 398 *sara = (unsigned long) &kretprobe_trampoline;
398} 399}
399 400
400int __kprobes kprobe_handler(struct pt_regs *regs) 401/*
402 * Interrupts are disabled on entry as trap3 is an interrupt gate and they
403 * remain disabled thorough out this function.
404 */
405static int __kprobes kprobe_handler(struct pt_regs *regs)
401{ 406{
402 struct kprobe *p; 407 struct kprobe *p;
403 int ret = 0; 408 int ret = 0;
404 kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); 409 kprobe_opcode_t *addr;
405 struct kprobe_ctlblk *kcb; 410 struct kprobe_ctlblk *kcb;
406 411
412 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
413
407 /* 414 /*
408 * We don't want to be preempted for the entire 415 * We don't want to be preempted for the entire
409 * duration of kprobe processing 416 * duration of kprobe processing
@@ -418,7 +425,7 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
418 if (kcb->kprobe_status == KPROBE_HIT_SS && 425 if (kcb->kprobe_status == KPROBE_HIT_SS &&
419 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { 426 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
420 regs->flags &= ~TF_MASK; 427 regs->flags &= ~TF_MASK;
421 regs->flags |= kcb->kprobe_saved_rflags; 428 regs->flags |= kcb->kprobe_saved_flags;
422 goto no_kprobe; 429 goto no_kprobe;
423 } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) { 430 } else if (kcb->kprobe_status == KPROBE_HIT_SSDONE) {
424 /* TODO: Provide re-entrancy from 431 /* TODO: Provide re-entrancy from
@@ -429,22 +436,20 @@ int __kprobes kprobe_handler(struct pt_regs *regs)
429 arch_disarm_kprobe(p); 436 arch_disarm_kprobe(p);
430 regs->ip = (unsigned long)p->addr; 437 regs->ip = (unsigned long)p->addr;
431 reset_current_kprobe(); 438 reset_current_kprobe();
432 ret = 1;
433 } else {
434 /* We have reentered the kprobe_handler(), since
435 * another probe was hit while within the
436 * handler. We here save the original kprobe
437 * variables and just single step on instruction
438 * of the new probe without calling any user
439 * handlers.
440 */
441 save_previous_kprobe(kcb);
442 set_current_kprobe(p, regs, kcb);
443 kprobes_inc_nmissed_count(p);
444 prepare_singlestep(p, regs);
445 kcb->kprobe_status = KPROBE_REENTER;
446 return 1; 439 return 1;
447 } 440 }
441 /* We have reentered the kprobe_handler(), since
442 * another probe was hit while within the handler.
443 * We here save the original kprobes variables and
444 * just single step on the instruction of the new probe
445 * without calling any user handlers.
446 */
447 save_previous_kprobe(kcb);
448 set_current_kprobe(p, regs, kcb);
449 kprobes_inc_nmissed_count(p);
450 prepare_singlestep(p, regs);
451 kcb->kprobe_status = KPROBE_REENTER;
452 return 1;
448 } else { 453 } else {
449 if (*addr != BREAKPOINT_INSTRUCTION) { 454 if (*addr != BREAKPOINT_INSTRUCTION) {
450 /* The breakpoint instruction was removed by 455 /* The breakpoint instruction was removed by
@@ -578,23 +583,23 @@ fastcall void * __kprobes trampoline_handler(struct pt_regs *regs)
578 INIT_HLIST_HEAD(&empty_rp); 583 INIT_HLIST_HEAD(&empty_rp);
579 spin_lock_irqsave(&kretprobe_lock, flags); 584 spin_lock_irqsave(&kretprobe_lock, flags);
580 head = kretprobe_inst_table_head(current); 585 head = kretprobe_inst_table_head(current);
581 /* fixup rt_regs */ 586 /* fixup registers */
582 regs->cs = __KERNEL_CS; 587 regs->cs = __KERNEL_CS;
583 regs->ip = trampoline_address; 588 regs->ip = trampoline_address;
584 regs->orig_ax = 0xffffffffffffffff; 589 regs->orig_ax = ~0UL;
585 590
586 /* 591 /*
587 * It is possible to have multiple instances associated with a given 592 * It is possible to have multiple instances associated with a given
588 * task either because an multiple functions in the call path 593 * task either because multiple functions in the call path have
589 * have a return probe installed on them, and/or more then one return 594 * return probes installed on them, and/or more then one
590 * return probe was registered for a target function. 595 * return probe was registered for a target function.
591 * 596 *
592 * We can handle this because: 597 * We can handle this because:
593 * - instances are always inserted at the head of the list 598 * - instances are always pushed into the head of the list
594 * - when multiple return probes are registered for the same 599 * - when multiple return probes are registered for the same
595 * function, the first instance's ret_addr will point to the 600 * function, the (chronologically) first instance's ret_addr
596 * real return address, and all the rest will point to 601 * will be the real return address, and all the rest will
597 * kretprobe_trampoline 602 * point to kretprobe_trampoline.
598 */ 603 */
599 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 604 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
600 if (ri->task != current) 605 if (ri->task != current)
@@ -661,9 +666,9 @@ fastcall void * __kprobes trampoline_handler(struct pt_regs *regs)
661static void __kprobes resume_execution(struct kprobe *p, 666static void __kprobes resume_execution(struct kprobe *p,
662 struct pt_regs *regs, struct kprobe_ctlblk *kcb) 667 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
663{ 668{
664 unsigned long *tos = (unsigned long *)regs->sp; 669 unsigned long *tos = stack_addr(regs);
665 unsigned long copy_rip = (unsigned long)p->ainsn.insn; 670 unsigned long copy_ip = (unsigned long)p->ainsn.insn;
666 unsigned long orig_rip = (unsigned long)p->addr; 671 unsigned long orig_ip = (unsigned long)p->addr;
667 kprobe_opcode_t *insn = p->ainsn.insn; 672 kprobe_opcode_t *insn = p->ainsn.insn;
668 673
669 /*skip the REX prefix*/ 674 /*skip the REX prefix*/
@@ -674,7 +679,7 @@ static void __kprobes resume_execution(struct kprobe *p,
674 switch (*insn) { 679 switch (*insn) {
675 case 0x9c: /* pushfl */ 680 case 0x9c: /* pushfl */
676 *tos &= ~(TF_MASK | IF_MASK); 681 *tos &= ~(TF_MASK | IF_MASK);
677 *tos |= kcb->kprobe_old_rflags; 682 *tos |= kcb->kprobe_old_flags;
678 break; 683 break;
679 case 0xc2: /* iret/ret/lret */ 684 case 0xc2: /* iret/ret/lret */
680 case 0xc3: 685 case 0xc3:
@@ -686,18 +691,23 @@ static void __kprobes resume_execution(struct kprobe *p,
686 p->ainsn.boostable = 1; 691 p->ainsn.boostable = 1;
687 goto no_change; 692 goto no_change;
688 case 0xe8: /* call relative - Fix return addr */ 693 case 0xe8: /* call relative - Fix return addr */
689 *tos = orig_rip + (*tos - copy_rip); 694 *tos = orig_ip + (*tos - copy_ip);
690 break; 695 break;
691 case 0xff: 696 case 0xff:
692 if ((insn[1] & 0x30) == 0x10) { 697 if ((insn[1] & 0x30) == 0x10) {
693 /* call absolute, indirect */ 698 /*
694 /* Fix return addr; ip is correct. */ 699 * call absolute, indirect
695 /* not boostable */ 700 * Fix return addr; ip is correct.
696 *tos = orig_rip + (*tos - copy_rip); 701 * But this is not boostable
702 */
703 *tos = orig_ip + (*tos - copy_ip);
697 goto no_change; 704 goto no_change;
698 } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ 705 } else if (((insn[1] & 0x31) == 0x20) ||
699 ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ 706 ((insn[1] & 0x31) == 0x21)) {
700 /* ip is correct. And this is boostable */ 707 /*
708 * jmp near and far, absolute indirect
709 * ip is correct. And this is boostable
710 */
701 p->ainsn.boostable = 1; 711 p->ainsn.boostable = 1;
702 goto no_change; 712 goto no_change;
703 } 713 }
@@ -706,21 +716,21 @@ static void __kprobes resume_execution(struct kprobe *p,
706 } 716 }
707 717
708 if (p->ainsn.boostable == 0) { 718 if (p->ainsn.boostable == 0) {
709 if ((regs->ip > copy_rip) && 719 if ((regs->ip > copy_ip) &&
710 (regs->ip - copy_rip) + 5 < MAX_INSN_SIZE) { 720 (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) {
711 /* 721 /*
712 * These instructions can be executed directly if it 722 * These instructions can be executed directly if it
713 * jumps back to correct address. 723 * jumps back to correct address.
714 */ 724 */
715 set_jmp_op((void *)regs->ip, 725 set_jmp_op((void *)regs->ip,
716 (void *)orig_rip + (regs->ip - copy_rip)); 726 (void *)orig_ip + (regs->ip - copy_ip));
717 p->ainsn.boostable = 1; 727 p->ainsn.boostable = 1;
718 } else { 728 } else {
719 p->ainsn.boostable = -1; 729 p->ainsn.boostable = -1;
720 } 730 }
721 } 731 }
722 732
723 regs->ip = orig_rip + (regs->ip - copy_rip); 733 regs->ip += orig_ip - copy_ip;
724 734
725no_change: 735no_change:
726 restore_btf(); 736 restore_btf();
@@ -728,7 +738,11 @@ no_change:
728 return; 738 return;
729} 739}
730 740
731int __kprobes post_kprobe_handler(struct pt_regs *regs) 741/*
742 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
743 * remain disabled thoroughout this function.
744 */
745static int __kprobes post_kprobe_handler(struct pt_regs *regs)
732{ 746{
733 struct kprobe *cur = kprobe_running(); 747 struct kprobe *cur = kprobe_running();
734 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 748 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -742,10 +756,10 @@ int __kprobes post_kprobe_handler(struct pt_regs *regs)
742 } 756 }
743 757
744 resume_execution(cur, regs, kcb); 758 resume_execution(cur, regs, kcb);
745 regs->flags |= kcb->kprobe_saved_rflags; 759 regs->flags |= kcb->kprobe_saved_flags;
746 trace_hardirqs_fixup_flags(regs->flags); 760 trace_hardirqs_fixup_flags(regs->flags);
747 761
748 /* Restore the original saved kprobes variables and continue. */ 762 /* Restore back the original saved kprobes variables and continue. */
749 if (kcb->kprobe_status == KPROBE_REENTER) { 763 if (kcb->kprobe_status == KPROBE_REENTER) {
750 restore_previous_kprobe(kcb); 764 restore_previous_kprobe(kcb);
751 goto out; 765 goto out;
@@ -782,7 +796,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
782 * normal page fault. 796 * normal page fault.
783 */ 797 */
784 regs->ip = (unsigned long)cur->addr; 798 regs->ip = (unsigned long)cur->addr;
785 regs->flags |= kcb->kprobe_old_rflags; 799 regs->flags |= kcb->kprobe_old_flags;
786 if (kcb->kprobe_status == KPROBE_REENTER) 800 if (kcb->kprobe_status == KPROBE_REENTER)
787 restore_previous_kprobe(kcb); 801 restore_previous_kprobe(kcb);
788 else 802 else
@@ -793,7 +807,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
793 case KPROBE_HIT_SSDONE: 807 case KPROBE_HIT_SSDONE:
794 /* 808 /*
795 * We increment the nmissed count for accounting, 809 * We increment the nmissed count for accounting,
796 * we can also use npre/npostfault count for accouting 810 * we can also use npre/npostfault count for accounting
797 * these specific fault cases. 811 * these specific fault cases.
798 */ 812 */
799 kprobes_inc_nmissed_count(cur); 813 kprobes_inc_nmissed_count(cur);
@@ -819,7 +833,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
819 } 833 }
820 834
821 /* 835 /*
822 * fixup() could not handle it, 836 * fixup routine could not handle it,
823 * Let do_page_fault() fix it. 837 * Let do_page_fault() fix it.
824 */ 838 */
825 break; 839 break;
@@ -838,7 +852,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
838 struct die_args *args = (struct die_args *)data; 852 struct die_args *args = (struct die_args *)data;
839 int ret = NOTIFY_DONE; 853 int ret = NOTIFY_DONE;
840 854
841 if (args->regs && user_mode(args->regs)) 855 if (args->regs && user_mode_vm(args->regs))
842 return ret; 856 return ret;
843 857
844 switch (val) { 858 switch (val) {
@@ -871,8 +885,9 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
871 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 885 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
872 886
873 kcb->jprobe_saved_regs = *regs; 887 kcb->jprobe_saved_regs = *regs;
874 kcb->jprobe_saved_rsp = (long *) regs->sp; 888 kcb->jprobe_saved_sp = stack_addr(regs);
875 addr = (unsigned long)(kcb->jprobe_saved_rsp); 889 addr = (unsigned long)(kcb->jprobe_saved_sp);
890
876 /* 891 /*
877 * As Linus pointed out, gcc assumes that the callee 892 * As Linus pointed out, gcc assumes that the callee
878 * owns the argument space and could overwrite it, e.g. 893 * owns the argument space and could overwrite it, e.g.
@@ -897,21 +912,20 @@ void __kprobes jprobe_return(void)
897 " .globl jprobe_return_end \n" 912 " .globl jprobe_return_end \n"
898 " jprobe_return_end: \n" 913 " jprobe_return_end: \n"
899 " nop \n"::"b" 914 " nop \n"::"b"
900 (kcb->jprobe_saved_rsp):"memory"); 915 (kcb->jprobe_saved_sp):"memory");
901} 916}
902 917
903int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 918int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
904{ 919{
905 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 920 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
906 u8 *addr = (u8 *) (regs->ip - 1); 921 u8 *addr = (u8 *) (regs->ip - 1);
907 unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_rsp);
908 struct jprobe *jp = container_of(p, struct jprobe, kp); 922 struct jprobe *jp = container_of(p, struct jprobe, kp);
909 923
910 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { 924 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) {
911 if ((unsigned long *)regs->sp != kcb->jprobe_saved_rsp) { 925 if (stack_addr(regs) != kcb->jprobe_saved_sp) {
912 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; 926 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
913 printk("current sp %p does not match saved sp %p\n", 927 printk("current sp %p does not match saved sp %p\n",
914 (long *)regs->sp, kcb->jprobe_saved_rsp); 928 stack_addr(regs), kcb->jprobe_saved_sp);
915 printk("Saved registers for jprobe %p\n", jp); 929 printk("Saved registers for jprobe %p\n", jp);
916 show_registers(saved_regs); 930 show_registers(saved_regs);
917 printk("Current registers\n"); 931 printk("Current registers\n");
@@ -919,8 +933,9 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
919 BUG(); 933 BUG();
920 } 934 }
921 *regs = kcb->jprobe_saved_regs; 935 *regs = kcb->jprobe_saved_regs;
922 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, 936 memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
923 MIN_STACK_SIZE(stack_addr)); 937 kcb->jprobes_stack,
938 MIN_STACK_SIZE(kcb->jprobe_saved_sp));
924 preempt_enable_no_resched(); 939 preempt_enable_no_resched();
925 return 1; 940 return 1;
926 } 941 }