diff options
author | Prasanna S Panchamukhi <prasanna@in.ibm.com> | 2005-09-06 18:19:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-07 19:57:59 -0400 |
commit | 0f2fbdcbb041f9087da42f8ac2e81f2817098d2a (patch) | |
tree | 3f54f91ca6972c6567cfe529b33fafb622b2d51c /arch/x86_64/kernel/kprobes.c | |
parent | 3d97ae5b958855ac007b6f56a0f94ab8ade09e9e (diff) |
[PATCH] kprobes: prevent possible race conditions x86_64 changes
This patch contains the x86_64 architecture specific changes to prevent the
possible race conditions.
Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/kprobes.c')
-rw-r--r-- | arch/x86_64/kernel/kprobes.c | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 5c6dc7051482..c21cceaea275 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
@@ -74,7 +74,7 @@ static inline int is_IF_modifier(kprobe_opcode_t *insn) | |||
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | 76 | ||
77 | int arch_prepare_kprobe(struct kprobe *p) | 77 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
78 | { | 78 | { |
79 | /* insn: must be on special executable page on x86_64. */ | 79 | /* insn: must be on special executable page on x86_64. */ |
80 | up(&kprobe_mutex); | 80 | up(&kprobe_mutex); |
@@ -189,7 +189,7 @@ static inline s32 *is_riprel(u8 *insn) | |||
189 | return NULL; | 189 | return NULL; |
190 | } | 190 | } |
191 | 191 | ||
192 | void arch_copy_kprobe(struct kprobe *p) | 192 | void __kprobes arch_copy_kprobe(struct kprobe *p) |
193 | { | 193 | { |
194 | s32 *ripdisp; | 194 | s32 *ripdisp; |
195 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE); | 195 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE); |
@@ -215,21 +215,21 @@ void arch_copy_kprobe(struct kprobe *p) | |||
215 | p->opcode = *p->addr; | 215 | p->opcode = *p->addr; |
216 | } | 216 | } |
217 | 217 | ||
218 | void arch_arm_kprobe(struct kprobe *p) | 218 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
219 | { | 219 | { |
220 | *p->addr = BREAKPOINT_INSTRUCTION; | 220 | *p->addr = BREAKPOINT_INSTRUCTION; |
221 | flush_icache_range((unsigned long) p->addr, | 221 | flush_icache_range((unsigned long) p->addr, |
222 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | 222 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); |
223 | } | 223 | } |
224 | 224 | ||
225 | void arch_disarm_kprobe(struct kprobe *p) | 225 | void __kprobes arch_disarm_kprobe(struct kprobe *p) |
226 | { | 226 | { |
227 | *p->addr = p->opcode; | 227 | *p->addr = p->opcode; |
228 | flush_icache_range((unsigned long) p->addr, | 228 | flush_icache_range((unsigned long) p->addr, |
229 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | 229 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); |
230 | } | 230 | } |
231 | 231 | ||
232 | void arch_remove_kprobe(struct kprobe *p) | 232 | void __kprobes arch_remove_kprobe(struct kprobe *p) |
233 | { | 233 | { |
234 | up(&kprobe_mutex); | 234 | up(&kprobe_mutex); |
235 | free_insn_slot(p->ainsn.insn); | 235 | free_insn_slot(p->ainsn.insn); |
@@ -261,7 +261,7 @@ static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs) | |||
261 | kprobe_saved_rflags &= ~IF_MASK; | 261 | kprobe_saved_rflags &= ~IF_MASK; |
262 | } | 262 | } |
263 | 263 | ||
264 | static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | 264 | static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
265 | { | 265 | { |
266 | regs->eflags |= TF_MASK; | 266 | regs->eflags |= TF_MASK; |
267 | regs->eflags &= ~IF_MASK; | 267 | regs->eflags &= ~IF_MASK; |
@@ -272,7 +272,8 @@ static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
272 | regs->rip = (unsigned long)p->ainsn.insn; | 272 | regs->rip = (unsigned long)p->ainsn.insn; |
273 | } | 273 | } |
274 | 274 | ||
275 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | 275 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, |
276 | struct pt_regs *regs) | ||
276 | { | 277 | { |
277 | unsigned long *sara = (unsigned long *)regs->rsp; | 278 | unsigned long *sara = (unsigned long *)regs->rsp; |
278 | struct kretprobe_instance *ri; | 279 | struct kretprobe_instance *ri; |
@@ -295,7 +296,7 @@ void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | |||
295 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they | 296 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they |
296 | * remain disabled thorough out this function. | 297 | * remain disabled thorough out this function. |
297 | */ | 298 | */ |
298 | int kprobe_handler(struct pt_regs *regs) | 299 | int __kprobes kprobe_handler(struct pt_regs *regs) |
299 | { | 300 | { |
300 | struct kprobe *p; | 301 | struct kprobe *p; |
301 | int ret = 0; | 302 | int ret = 0; |
@@ -399,7 +400,7 @@ no_kprobe: | |||
399 | /* | 400 | /* |
400 | * Called when we hit the probe point at kretprobe_trampoline | 401 | * Called when we hit the probe point at kretprobe_trampoline |
401 | */ | 402 | */ |
402 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | 403 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) |
403 | { | 404 | { |
404 | struct kretprobe_instance *ri = NULL; | 405 | struct kretprobe_instance *ri = NULL; |
405 | struct hlist_head *head; | 406 | struct hlist_head *head; |
@@ -478,7 +479,7 @@ int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
478 | * that is atop the stack is the address following the copied instruction. | 479 | * that is atop the stack is the address following the copied instruction. |
479 | * We need to make it the address following the original instruction. | 480 | * We need to make it the address following the original instruction. |
480 | */ | 481 | */ |
481 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) | 482 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) |
482 | { | 483 | { |
483 | unsigned long *tos = (unsigned long *)regs->rsp; | 484 | unsigned long *tos = (unsigned long *)regs->rsp; |
484 | unsigned long next_rip = 0; | 485 | unsigned long next_rip = 0; |
@@ -536,7 +537,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
536 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they | 537 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they |
537 | * remain disabled thoroughout this function. And we hold kprobe lock. | 538 | * remain disabled thoroughout this function. And we hold kprobe lock. |
538 | */ | 539 | */ |
539 | int post_kprobe_handler(struct pt_regs *regs) | 540 | int __kprobes post_kprobe_handler(struct pt_regs *regs) |
540 | { | 541 | { |
541 | if (!kprobe_running()) | 542 | if (!kprobe_running()) |
542 | return 0; | 543 | return 0; |
@@ -571,7 +572,7 @@ out: | |||
571 | } | 572 | } |
572 | 573 | ||
573 | /* Interrupts disabled, kprobe_lock held. */ | 574 | /* Interrupts disabled, kprobe_lock held. */ |
574 | int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 575 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
575 | { | 576 | { |
576 | if (current_kprobe->fault_handler | 577 | if (current_kprobe->fault_handler |
577 | && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) | 578 | && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) |
@@ -590,8 +591,8 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
590 | /* | 591 | /* |
591 | * Wrapper routine for handling exceptions. | 592 | * Wrapper routine for handling exceptions. |
592 | */ | 593 | */ |
593 | int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, | 594 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, |
594 | void *data) | 595 | unsigned long val, void *data) |
595 | { | 596 | { |
596 | struct die_args *args = (struct die_args *)data; | 597 | struct die_args *args = (struct die_args *)data; |
597 | switch (val) { | 598 | switch (val) { |
@@ -619,7 +620,7 @@ int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, | |||
619 | return NOTIFY_DONE; | 620 | return NOTIFY_DONE; |
620 | } | 621 | } |
621 | 622 | ||
622 | int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | 623 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
623 | { | 624 | { |
624 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 625 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
625 | unsigned long addr; | 626 | unsigned long addr; |
@@ -640,7 +641,7 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
640 | return 1; | 641 | return 1; |
641 | } | 642 | } |
642 | 643 | ||
643 | void jprobe_return(void) | 644 | void __kprobes jprobe_return(void) |
644 | { | 645 | { |
645 | preempt_enable_no_resched(); | 646 | preempt_enable_no_resched(); |
646 | asm volatile (" xchg %%rbx,%%rsp \n" | 647 | asm volatile (" xchg %%rbx,%%rsp \n" |
@@ -651,7 +652,7 @@ void jprobe_return(void) | |||
651 | (jprobe_saved_rsp):"memory"); | 652 | (jprobe_saved_rsp):"memory"); |
652 | } | 653 | } |
653 | 654 | ||
654 | int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 655 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
655 | { | 656 | { |
656 | u8 *addr = (u8 *) (regs->rip - 1); | 657 | u8 *addr = (u8 *) (regs->rip - 1); |
657 | unsigned long stack_addr = (unsigned long)jprobe_saved_rsp; | 658 | unsigned long stack_addr = (unsigned long)jprobe_saved_rsp; |