aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPrasanna S Panchamukhi <prasanna@in.ibm.com>2006-04-19 01:22:00 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-04-19 12:13:53 -0400
commit3b60211c1618063cb296439ebaef2041a725ba20 (patch)
tree5de7fd33aeac8397df4e28aea50eb2bb34829de9
parent34c37e18696ff6a773f0403348342a9fe49df4af (diff)
[PATCH] Switch Kprobes inline functions to __kprobes for x86_64
Andrew Morton pointed out that compiler might not inline the functions marked for inline in kprobes. There-by allowing the insertion of probes on these kprobes routines, which might cause recursion. This patch removes all such inline and adds them to kprobes section there by disallowing probes on all such routines. Some of the routines can even still be inlined, since these routines gets executed after the kprobes had done necessay setup for reentrancy. Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/x86_64/kernel/kprobes.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index accbff3fec49..1eaa5dae6174 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -53,7 +53,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
53/* 53/*
54 * returns non-zero if opcode modifies the interrupt flag. 54 * returns non-zero if opcode modifies the interrupt flag.
55 */ 55 */
56static inline int is_IF_modifier(kprobe_opcode_t *insn) 56static __always_inline int is_IF_modifier(kprobe_opcode_t *insn)
57{ 57{
58 switch (*insn) { 58 switch (*insn) {
59 case 0xfa: /* cli */ 59 case 0xfa: /* cli */
@@ -84,7 +84,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
84 * If it does, return the address of the 32-bit displacement word. 84 * If it does, return the address of the 32-bit displacement word.
85 * If not, return null. 85 * If not, return null.
86 */ 86 */
87static inline s32 *is_riprel(u8 *insn) 87static s32 __kprobes *is_riprel(u8 *insn)
88{ 88{
89#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \ 89#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
90 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \ 90 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
@@ -229,7 +229,7 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
229 mutex_unlock(&kprobe_mutex); 229 mutex_unlock(&kprobe_mutex);
230} 230}
231 231
232static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) 232static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
233{ 233{
234 kcb->prev_kprobe.kp = kprobe_running(); 234 kcb->prev_kprobe.kp = kprobe_running();
235 kcb->prev_kprobe.status = kcb->kprobe_status; 235 kcb->prev_kprobe.status = kcb->kprobe_status;
@@ -237,7 +237,7 @@ static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
237 kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags; 237 kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags;
238} 238}
239 239
240static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) 240static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
241{ 241{
242 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 242 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
243 kcb->kprobe_status = kcb->prev_kprobe.status; 243 kcb->kprobe_status = kcb->prev_kprobe.status;
@@ -245,7 +245,7 @@ static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
245 kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags; 245 kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags;
246} 246}
247 247
248static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 248static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
249 struct kprobe_ctlblk *kcb) 249 struct kprobe_ctlblk *kcb)
250{ 250{
251 __get_cpu_var(current_kprobe) = p; 251 __get_cpu_var(current_kprobe) = p;