diff options
Diffstat (limited to 'arch/i386/kernel/kprobes.c')
| -rw-r--r-- | arch/i386/kernel/kprobes.c | 180 |
1 files changed, 95 insertions, 85 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 6345b430b105..32b0c24ab9a6 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
| @@ -31,22 +31,16 @@ | |||
| 31 | #include <linux/config.h> | 31 | #include <linux/config.h> |
| 32 | #include <linux/kprobes.h> | 32 | #include <linux/kprobes.h> |
| 33 | #include <linux/ptrace.h> | 33 | #include <linux/ptrace.h> |
| 34 | #include <linux/spinlock.h> | ||
| 35 | #include <linux/preempt.h> | 34 | #include <linux/preempt.h> |
| 36 | #include <asm/cacheflush.h> | 35 | #include <asm/cacheflush.h> |
| 37 | #include <asm/kdebug.h> | 36 | #include <asm/kdebug.h> |
| 38 | #include <asm/desc.h> | 37 | #include <asm/desc.h> |
| 39 | 38 | ||
| 40 | static struct kprobe *current_kprobe; | ||
| 41 | static unsigned long kprobe_status, kprobe_old_eflags, kprobe_saved_eflags; | ||
| 42 | static struct kprobe *kprobe_prev; | ||
| 43 | static unsigned long kprobe_status_prev, kprobe_old_eflags_prev, kprobe_saved_eflags_prev; | ||
| 44 | static struct pt_regs jprobe_saved_regs; | ||
| 45 | static long *jprobe_saved_esp; | ||
| 46 | /* copy of the kernel stack at the probe fire time */ | ||
| 47 | static kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE]; | ||
| 48 | void jprobe_return_end(void); | 39 | void jprobe_return_end(void); |
| 49 | 40 | ||
| 41 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | ||
| 42 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | ||
| 43 | |||
| 50 | /* | 44 | /* |
| 51 | * returns non-zero if opcode modifies the interrupt flag. | 45 | * returns non-zero if opcode modifies the interrupt flag. |
| 52 | */ | 46 | */ |
| @@ -91,29 +85,30 @@ void __kprobes arch_remove_kprobe(struct kprobe *p) | |||
| 91 | { | 85 | { |
| 92 | } | 86 | } |
| 93 | 87 | ||
| 94 | static inline void save_previous_kprobe(void) | 88 | static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) |
| 95 | { | 89 | { |
| 96 | kprobe_prev = current_kprobe; | 90 | kcb->prev_kprobe.kp = kprobe_running(); |
| 97 | kprobe_status_prev = kprobe_status; | 91 | kcb->prev_kprobe.status = kcb->kprobe_status; |
| 98 | kprobe_old_eflags_prev = kprobe_old_eflags; | 92 | kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags; |
| 99 | kprobe_saved_eflags_prev = kprobe_saved_eflags; | 93 | kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags; |
| 100 | } | 94 | } |
| 101 | 95 | ||
| 102 | static inline void restore_previous_kprobe(void) | 96 | static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) |
| 103 | { | 97 | { |
| 104 | current_kprobe = kprobe_prev; | 98 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
| 105 | kprobe_status = kprobe_status_prev; | 99 | kcb->kprobe_status = kcb->prev_kprobe.status; |
| 106 | kprobe_old_eflags = kprobe_old_eflags_prev; | 100 | kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags; |
| 107 | kprobe_saved_eflags = kprobe_saved_eflags_prev; | 101 | kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags; |
| 108 | } | 102 | } |
| 109 | 103 | ||
| 110 | static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs) | 104 | static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
| 105 | struct kprobe_ctlblk *kcb) | ||
| 111 | { | 106 | { |
| 112 | current_kprobe = p; | 107 | __get_cpu_var(current_kprobe) = p; |
| 113 | kprobe_saved_eflags = kprobe_old_eflags | 108 | kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags |
| 114 | = (regs->eflags & (TF_MASK | IF_MASK)); | 109 | = (regs->eflags & (TF_MASK | IF_MASK)); |
| 115 | if (is_IF_modifier(p->opcode)) | 110 | if (is_IF_modifier(p->opcode)) |
| 116 | kprobe_saved_eflags &= ~IF_MASK; | 111 | kcb->kprobe_saved_eflags &= ~IF_MASK; |
| 117 | } | 112 | } |
| 118 | 113 | ||
| 119 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | 114 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
| @@ -127,6 +122,7 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
| 127 | regs->eip = (unsigned long)&p->ainsn.insn; | 122 | regs->eip = (unsigned long)&p->ainsn.insn; |
| 128 | } | 123 | } |
| 129 | 124 | ||
| 125 | /* Called with kretprobe_lock held */ | ||
| 130 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, | 126 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, |
| 131 | struct pt_regs *regs) | 127 | struct pt_regs *regs) |
| 132 | { | 128 | { |
| @@ -157,9 +153,15 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
| 157 | int ret = 0; | 153 | int ret = 0; |
| 158 | kprobe_opcode_t *addr = NULL; | 154 | kprobe_opcode_t *addr = NULL; |
| 159 | unsigned long *lp; | 155 | unsigned long *lp; |
| 156 | struct kprobe_ctlblk *kcb; | ||
| 160 | 157 | ||
| 161 | /* We're in an interrupt, but this is clear and BUG()-safe. */ | 158 | /* |
| 159 | * We don't want to be preempted for the entire | ||
| 160 | * duration of kprobe processing | ||
| 161 | */ | ||
| 162 | preempt_disable(); | 162 | preempt_disable(); |
| 163 | kcb = get_kprobe_ctlblk(); | ||
| 164 | |||
| 163 | /* Check if the application is using LDT entry for its code segment and | 165 | /* Check if the application is using LDT entry for its code segment and |
| 164 | * calculate the address by reading the base address from the LDT entry. | 166 | * calculate the address by reading the base address from the LDT entry. |
| 165 | */ | 167 | */ |
| @@ -173,15 +175,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
| 173 | } | 175 | } |
| 174 | /* Check we're not actually recursing */ | 176 | /* Check we're not actually recursing */ |
| 175 | if (kprobe_running()) { | 177 | if (kprobe_running()) { |
| 176 | /* We *are* holding lock here, so this is safe. | ||
| 177 | Disarm the probe we just hit, and ignore it. */ | ||
| 178 | p = get_kprobe(addr); | 178 | p = get_kprobe(addr); |
| 179 | if (p) { | 179 | if (p) { |
| 180 | if (kprobe_status == KPROBE_HIT_SS && | 180 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
| 181 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { | 181 | *p->ainsn.insn == BREAKPOINT_INSTRUCTION) { |
| 182 | regs->eflags &= ~TF_MASK; | 182 | regs->eflags &= ~TF_MASK; |
| 183 | regs->eflags |= kprobe_saved_eflags; | 183 | regs->eflags |= kcb->kprobe_saved_eflags; |
| 184 | unlock_kprobes(); | ||
| 185 | goto no_kprobe; | 184 | goto no_kprobe; |
| 186 | } | 185 | } |
| 187 | /* We have reentered the kprobe_handler(), since | 186 | /* We have reentered the kprobe_handler(), since |
| @@ -190,26 +189,23 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
| 190 | * just single step on the instruction of the new probe | 189 | * just single step on the instruction of the new probe |
| 191 | * without calling any user handlers. | 190 | * without calling any user handlers. |
| 192 | */ | 191 | */ |
| 193 | save_previous_kprobe(); | 192 | save_previous_kprobe(kcb); |
| 194 | set_current_kprobe(p, regs); | 193 | set_current_kprobe(p, regs, kcb); |
| 195 | p->nmissed++; | 194 | p->nmissed++; |
| 196 | prepare_singlestep(p, regs); | 195 | prepare_singlestep(p, regs); |
| 197 | kprobe_status = KPROBE_REENTER; | 196 | kcb->kprobe_status = KPROBE_REENTER; |
| 198 | return 1; | 197 | return 1; |
| 199 | } else { | 198 | } else { |
| 200 | p = current_kprobe; | 199 | p = __get_cpu_var(current_kprobe); |
| 201 | if (p->break_handler && p->break_handler(p, regs)) { | 200 | if (p->break_handler && p->break_handler(p, regs)) { |
| 202 | goto ss_probe; | 201 | goto ss_probe; |
| 203 | } | 202 | } |
| 204 | } | 203 | } |
| 205 | /* If it's not ours, can't be delete race, (we hold lock). */ | ||
| 206 | goto no_kprobe; | 204 | goto no_kprobe; |
| 207 | } | 205 | } |
| 208 | 206 | ||
| 209 | lock_kprobes(); | ||
| 210 | p = get_kprobe(addr); | 207 | p = get_kprobe(addr); |
| 211 | if (!p) { | 208 | if (!p) { |
| 212 | unlock_kprobes(); | ||
| 213 | if (regs->eflags & VM_MASK) { | 209 | if (regs->eflags & VM_MASK) { |
| 214 | /* We are in virtual-8086 mode. Return 0 */ | 210 | /* We are in virtual-8086 mode. Return 0 */ |
| 215 | goto no_kprobe; | 211 | goto no_kprobe; |
| @@ -232,8 +228,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
| 232 | goto no_kprobe; | 228 | goto no_kprobe; |
| 233 | } | 229 | } |
| 234 | 230 | ||
| 235 | kprobe_status = KPROBE_HIT_ACTIVE; | 231 | set_current_kprobe(p, regs, kcb); |
| 236 | set_current_kprobe(p, regs); | 232 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
| 237 | 233 | ||
| 238 | if (p->pre_handler && p->pre_handler(p, regs)) | 234 | if (p->pre_handler && p->pre_handler(p, regs)) |
| 239 | /* handler has already set things up, so skip ss setup */ | 235 | /* handler has already set things up, so skip ss setup */ |
| @@ -241,7 +237,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
| 241 | 237 | ||
| 242 | ss_probe: | 238 | ss_probe: |
| 243 | prepare_singlestep(p, regs); | 239 | prepare_singlestep(p, regs); |
| 244 | kprobe_status = KPROBE_HIT_SS; | 240 | kcb->kprobe_status = KPROBE_HIT_SS; |
| 245 | return 1; | 241 | return 1; |
| 246 | 242 | ||
| 247 | no_kprobe: | 243 | no_kprobe: |
| @@ -269,9 +265,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 269 | struct kretprobe_instance *ri = NULL; | 265 | struct kretprobe_instance *ri = NULL; |
| 270 | struct hlist_head *head; | 266 | struct hlist_head *head; |
| 271 | struct hlist_node *node, *tmp; | 267 | struct hlist_node *node, *tmp; |
| 272 | unsigned long orig_ret_address = 0; | 268 | unsigned long flags, orig_ret_address = 0; |
| 273 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; | 269 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
| 274 | 270 | ||
| 271 | spin_lock_irqsave(&kretprobe_lock, flags); | ||
| 275 | head = kretprobe_inst_table_head(current); | 272 | head = kretprobe_inst_table_head(current); |
| 276 | 273 | ||
| 277 | /* | 274 | /* |
| @@ -310,14 +307,15 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 310 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | 307 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); |
| 311 | regs->eip = orig_ret_address; | 308 | regs->eip = orig_ret_address; |
| 312 | 309 | ||
| 313 | unlock_kprobes(); | 310 | reset_current_kprobe(); |
| 311 | spin_unlock_irqrestore(&kretprobe_lock, flags); | ||
| 314 | preempt_enable_no_resched(); | 312 | preempt_enable_no_resched(); |
| 315 | 313 | ||
| 316 | /* | 314 | /* |
| 317 | * By returning a non-zero value, we are telling | 315 | * By returning a non-zero value, we are telling |
| 318 | * kprobe_handler() that we have handled unlocking | 316 | * kprobe_handler() that we don't want the post_handler |
| 319 | * and re-enabling preemption. | 317 | * to run (and have re-enabled preemption) |
| 320 | */ | 318 | */ |
| 321 | return 1; | 319 | return 1; |
| 322 | } | 320 | } |
| 323 | 321 | ||
| @@ -343,7 +341,8 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 343 | * that is atop the stack is the address following the copied instruction. | 341 | * that is atop the stack is the address following the copied instruction. |
| 344 | * We need to make it the address following the original instruction. | 342 | * We need to make it the address following the original instruction. |
| 345 | */ | 343 | */ |
| 346 | static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | 344 | static void __kprobes resume_execution(struct kprobe *p, |
| 345 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | ||
| 347 | { | 346 | { |
| 348 | unsigned long *tos = (unsigned long *)®s->esp; | 347 | unsigned long *tos = (unsigned long *)®s->esp; |
| 349 | unsigned long next_eip = 0; | 348 | unsigned long next_eip = 0; |
| @@ -353,7 +352,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
| 353 | switch (p->ainsn.insn[0]) { | 352 | switch (p->ainsn.insn[0]) { |
| 354 | case 0x9c: /* pushfl */ | 353 | case 0x9c: /* pushfl */ |
| 355 | *tos &= ~(TF_MASK | IF_MASK); | 354 | *tos &= ~(TF_MASK | IF_MASK); |
| 356 | *tos |= kprobe_old_eflags; | 355 | *tos |= kcb->kprobe_old_eflags; |
| 357 | break; | 356 | break; |
| 358 | case 0xc3: /* ret/lret */ | 357 | case 0xc3: /* ret/lret */ |
| 359 | case 0xcb: | 358 | case 0xcb: |
| @@ -394,27 +393,30 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
| 394 | 393 | ||
| 395 | /* | 394 | /* |
| 396 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they | 395 | * Interrupts are disabled on entry as trap1 is an interrupt gate and they |
| 397 | * remain disabled thoroughout this function. And we hold kprobe lock. | 396 | * remain disabled thoroughout this function. |
| 398 | */ | 397 | */ |
| 399 | static inline int post_kprobe_handler(struct pt_regs *regs) | 398 | static inline int post_kprobe_handler(struct pt_regs *regs) |
| 400 | { | 399 | { |
| 401 | if (!kprobe_running()) | 400 | struct kprobe *cur = kprobe_running(); |
| 401 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 402 | |||
| 403 | if (!cur) | ||
| 402 | return 0; | 404 | return 0; |
| 403 | 405 | ||
| 404 | if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) { | 406 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
| 405 | kprobe_status = KPROBE_HIT_SSDONE; | 407 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
| 406 | current_kprobe->post_handler(current_kprobe, regs, 0); | 408 | cur->post_handler(cur, regs, 0); |
| 407 | } | 409 | } |
| 408 | 410 | ||
| 409 | resume_execution(current_kprobe, regs); | 411 | resume_execution(cur, regs, kcb); |
| 410 | regs->eflags |= kprobe_saved_eflags; | 412 | regs->eflags |= kcb->kprobe_saved_eflags; |
| 411 | 413 | ||
| 412 | /*Restore back the original saved kprobes variables and continue. */ | 414 | /*Restore back the original saved kprobes variables and continue. */ |
| 413 | if (kprobe_status == KPROBE_REENTER) { | 415 | if (kcb->kprobe_status == KPROBE_REENTER) { |
| 414 | restore_previous_kprobe(); | 416 | restore_previous_kprobe(kcb); |
| 415 | goto out; | 417 | goto out; |
| 416 | } | 418 | } |
| 417 | unlock_kprobes(); | 419 | reset_current_kprobe(); |
| 418 | out: | 420 | out: |
| 419 | preempt_enable_no_resched(); | 421 | preempt_enable_no_resched(); |
| 420 | 422 | ||
| @@ -429,18 +431,19 @@ out: | |||
| 429 | return 1; | 431 | return 1; |
| 430 | } | 432 | } |
| 431 | 433 | ||
| 432 | /* Interrupts disabled, kprobe_lock held. */ | ||
| 433 | static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 434 | static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
| 434 | { | 435 | { |
| 435 | if (current_kprobe->fault_handler | 436 | struct kprobe *cur = kprobe_running(); |
| 436 | && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) | 437 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 438 | |||
| 439 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
| 437 | return 1; | 440 | return 1; |
| 438 | 441 | ||
| 439 | if (kprobe_status & KPROBE_HIT_SS) { | 442 | if (kcb->kprobe_status & KPROBE_HIT_SS) { |
| 440 | resume_execution(current_kprobe, regs); | 443 | resume_execution(cur, regs, kcb); |
| 441 | regs->eflags |= kprobe_old_eflags; | 444 | regs->eflags |= kcb->kprobe_old_eflags; |
| 442 | 445 | ||
| 443 | unlock_kprobes(); | 446 | reset_current_kprobe(); |
| 444 | preempt_enable_no_resched(); | 447 | preempt_enable_no_resched(); |
| 445 | } | 448 | } |
| 446 | return 0; | 449 | return 0; |
| @@ -453,39 +456,41 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
| 453 | unsigned long val, void *data) | 456 | unsigned long val, void *data) |
| 454 | { | 457 | { |
| 455 | struct die_args *args = (struct die_args *)data; | 458 | struct die_args *args = (struct die_args *)data; |
| 459 | int ret = NOTIFY_DONE; | ||
| 460 | |||
| 456 | switch (val) { | 461 | switch (val) { |
| 457 | case DIE_INT3: | 462 | case DIE_INT3: |
| 458 | if (kprobe_handler(args->regs)) | 463 | if (kprobe_handler(args->regs)) |
| 459 | return NOTIFY_STOP; | 464 | ret = NOTIFY_STOP; |
| 460 | break; | 465 | break; |
| 461 | case DIE_DEBUG: | 466 | case DIE_DEBUG: |
| 462 | if (post_kprobe_handler(args->regs)) | 467 | if (post_kprobe_handler(args->regs)) |
| 463 | return NOTIFY_STOP; | 468 | ret = NOTIFY_STOP; |
| 464 | break; | 469 | break; |
| 465 | case DIE_GPF: | 470 | case DIE_GPF: |
| 466 | if (kprobe_running() && | ||
| 467 | kprobe_fault_handler(args->regs, args->trapnr)) | ||
| 468 | return NOTIFY_STOP; | ||
| 469 | break; | ||
| 470 | case DIE_PAGE_FAULT: | 471 | case DIE_PAGE_FAULT: |
| 472 | /* kprobe_running() needs smp_processor_id() */ | ||
| 473 | preempt_disable(); | ||
| 471 | if (kprobe_running() && | 474 | if (kprobe_running() && |
| 472 | kprobe_fault_handler(args->regs, args->trapnr)) | 475 | kprobe_fault_handler(args->regs, args->trapnr)) |
| 473 | return NOTIFY_STOP; | 476 | ret = NOTIFY_STOP; |
| 477 | preempt_enable(); | ||
| 474 | break; | 478 | break; |
| 475 | default: | 479 | default: |
| 476 | break; | 480 | break; |
| 477 | } | 481 | } |
| 478 | return NOTIFY_DONE; | 482 | return ret; |
| 479 | } | 483 | } |
| 480 | 484 | ||
| 481 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | 485 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
| 482 | { | 486 | { |
| 483 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 487 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
| 484 | unsigned long addr; | 488 | unsigned long addr; |
| 489 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 485 | 490 | ||
| 486 | jprobe_saved_regs = *regs; | 491 | kcb->jprobe_saved_regs = *regs; |
| 487 | jprobe_saved_esp = ®s->esp; | 492 | kcb->jprobe_saved_esp = ®s->esp; |
| 488 | addr = (unsigned long)jprobe_saved_esp; | 493 | addr = (unsigned long)(kcb->jprobe_saved_esp); |
| 489 | 494 | ||
| 490 | /* | 495 | /* |
| 491 | * TBD: As Linus pointed out, gcc assumes that the callee | 496 | * TBD: As Linus pointed out, gcc assumes that the callee |
| @@ -494,7 +499,8 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 494 | * we also save and restore enough stack bytes to cover | 499 | * we also save and restore enough stack bytes to cover |
| 495 | * the argument area. | 500 | * the argument area. |
| 496 | */ | 501 | */ |
| 497 | memcpy(jprobes_stack, (kprobe_opcode_t *) addr, MIN_STACK_SIZE(addr)); | 502 | memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, |
| 503 | MIN_STACK_SIZE(addr)); | ||
| 498 | regs->eflags &= ~IF_MASK; | 504 | regs->eflags &= ~IF_MASK; |
| 499 | regs->eip = (unsigned long)(jp->entry); | 505 | regs->eip = (unsigned long)(jp->entry); |
| 500 | return 1; | 506 | return 1; |
| @@ -502,36 +508,40 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 502 | 508 | ||
| 503 | void __kprobes jprobe_return(void) | 509 | void __kprobes jprobe_return(void) |
| 504 | { | 510 | { |
| 505 | preempt_enable_no_resched(); | 511 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 512 | |||
| 506 | asm volatile (" xchgl %%ebx,%%esp \n" | 513 | asm volatile (" xchgl %%ebx,%%esp \n" |
| 507 | " int3 \n" | 514 | " int3 \n" |
| 508 | " .globl jprobe_return_end \n" | 515 | " .globl jprobe_return_end \n" |
| 509 | " jprobe_return_end: \n" | 516 | " jprobe_return_end: \n" |
| 510 | " nop \n"::"b" | 517 | " nop \n"::"b" |
| 511 | (jprobe_saved_esp):"memory"); | 518 | (kcb->jprobe_saved_esp):"memory"); |
| 512 | } | 519 | } |
| 513 | 520 | ||
| 514 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 521 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
| 515 | { | 522 | { |
| 523 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 516 | u8 *addr = (u8 *) (regs->eip - 1); | 524 | u8 *addr = (u8 *) (regs->eip - 1); |
| 517 | unsigned long stack_addr = (unsigned long)jprobe_saved_esp; | 525 | unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_esp); |
| 518 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 526 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
| 519 | 527 | ||
| 520 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { | 528 | if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { |
| 521 | if (®s->esp != jprobe_saved_esp) { | 529 | if (®s->esp != kcb->jprobe_saved_esp) { |
| 522 | struct pt_regs *saved_regs = | 530 | struct pt_regs *saved_regs = |
| 523 | container_of(jprobe_saved_esp, struct pt_regs, esp); | 531 | container_of(kcb->jprobe_saved_esp, |
| 532 | struct pt_regs, esp); | ||
| 524 | printk("current esp %p does not match saved esp %p\n", | 533 | printk("current esp %p does not match saved esp %p\n", |
| 525 | ®s->esp, jprobe_saved_esp); | 534 | ®s->esp, kcb->jprobe_saved_esp); |
| 526 | printk("Saved registers for jprobe %p\n", jp); | 535 | printk("Saved registers for jprobe %p\n", jp); |
| 527 | show_registers(saved_regs); | 536 | show_registers(saved_regs); |
| 528 | printk("Current registers\n"); | 537 | printk("Current registers\n"); |
| 529 | show_registers(regs); | 538 | show_registers(regs); |
| 530 | BUG(); | 539 | BUG(); |
| 531 | } | 540 | } |
| 532 | *regs = jprobe_saved_regs; | 541 | *regs = kcb->jprobe_saved_regs; |
| 533 | memcpy((kprobe_opcode_t *) stack_addr, jprobes_stack, | 542 | memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, |
| 534 | MIN_STACK_SIZE(stack_addr)); | 543 | MIN_STACK_SIZE(stack_addr)); |
| 544 | preempt_enable_no_resched(); | ||
| 535 | return 1; | 545 | return 1; |
| 536 | } | 546 | } |
| 537 | return 0; | 547 | return 0; |
