diff options
Diffstat (limited to 'arch/ppc64/kernel/kprobes.c')
| -rw-r--r-- | arch/ppc64/kernel/kprobes.c | 138 |
1 files changed, 73 insertions, 65 deletions
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c index ed876a5178ae..511af54e6230 100644 --- a/arch/ppc64/kernel/kprobes.c +++ b/arch/ppc64/kernel/kprobes.c | |||
| @@ -30,19 +30,14 @@ | |||
| 30 | #include <linux/config.h> | 30 | #include <linux/config.h> |
| 31 | #include <linux/kprobes.h> | 31 | #include <linux/kprobes.h> |
| 32 | #include <linux/ptrace.h> | 32 | #include <linux/ptrace.h> |
| 33 | #include <linux/spinlock.h> | ||
| 34 | #include <linux/preempt.h> | 33 | #include <linux/preempt.h> |
| 35 | #include <asm/cacheflush.h> | 34 | #include <asm/cacheflush.h> |
| 36 | #include <asm/kdebug.h> | 35 | #include <asm/kdebug.h> |
| 37 | #include <asm/sstep.h> | 36 | #include <asm/sstep.h> |
| 38 | 37 | ||
| 39 | static DECLARE_MUTEX(kprobe_mutex); | 38 | static DECLARE_MUTEX(kprobe_mutex); |
| 40 | 39 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | |
| 41 | static struct kprobe *current_kprobe; | 40 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
| 42 | static unsigned long kprobe_status, kprobe_saved_msr; | ||
| 43 | static struct kprobe *kprobe_prev; | ||
| 44 | static unsigned long kprobe_status_prev, kprobe_saved_msr_prev; | ||
| 45 | static struct pt_regs jprobe_saved_regs; | ||
| 46 | 41 | ||
| 47 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 42 | int __kprobes arch_prepare_kprobe(struct kprobe *p) |
| 48 | { | 43 | { |
| @@ -108,20 +103,28 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
| 108 | regs->nip = (unsigned long)p->ainsn.insn; | 103 | regs->nip = (unsigned long)p->ainsn.insn; |
| 109 | } | 104 | } |
| 110 | 105 | ||
| 111 | static inline void save_previous_kprobe(void) | 106 | static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb) |
| 107 | { | ||
| 108 | kcb->prev_kprobe.kp = kprobe_running(); | ||
| 109 | kcb->prev_kprobe.status = kcb->kprobe_status; | ||
| 110 | kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr; | ||
| 111 | } | ||
| 112 | |||
| 113 | static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
| 112 | { | 114 | { |
| 113 | kprobe_prev = current_kprobe; | 115 | __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; |
| 114 | kprobe_status_prev = kprobe_status; | 116 | kcb->kprobe_status = kcb->prev_kprobe.status; |
| 115 | kprobe_saved_msr_prev = kprobe_saved_msr; | 117 | kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; |
| 116 | } | 118 | } |
| 117 | 119 | ||
| 118 | static inline void restore_previous_kprobe(void) | 120 | static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, |
| 121 | struct kprobe_ctlblk *kcb) | ||
| 119 | { | 122 | { |
| 120 | current_kprobe = kprobe_prev; | 123 | __get_cpu_var(current_kprobe) = p; |
| 121 | kprobe_status = kprobe_status_prev; | 124 | kcb->kprobe_saved_msr = regs->msr; |
| 122 | kprobe_saved_msr = kprobe_saved_msr_prev; | ||
| 123 | } | 125 | } |
| 124 | 126 | ||
| 127 | /* Called with kretprobe_lock held */ | ||
| 125 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, | 128 | void __kprobes arch_prepare_kretprobe(struct kretprobe *rp, |
| 126 | struct pt_regs *regs) | 129 | struct pt_regs *regs) |
| 127 | { | 130 | { |
| @@ -145,19 +148,24 @@ static inline int kprobe_handler(struct pt_regs *regs) | |||
| 145 | struct kprobe *p; | 148 | struct kprobe *p; |
| 146 | int ret = 0; | 149 | int ret = 0; |
| 147 | unsigned int *addr = (unsigned int *)regs->nip; | 150 | unsigned int *addr = (unsigned int *)regs->nip; |
| 151 | struct kprobe_ctlblk *kcb; | ||
| 152 | |||
| 153 | /* | ||
| 154 | * We don't want to be preempted for the entire | ||
| 155 | * duration of kprobe processing | ||
| 156 | */ | ||
| 157 | preempt_disable(); | ||
| 158 | kcb = get_kprobe_ctlblk(); | ||
| 148 | 159 | ||
| 149 | /* Check we're not actually recursing */ | 160 | /* Check we're not actually recursing */ |
| 150 | if (kprobe_running()) { | 161 | if (kprobe_running()) { |
| 151 | /* We *are* holding lock here, so this is safe. | ||
| 152 | Disarm the probe we just hit, and ignore it. */ | ||
| 153 | p = get_kprobe(addr); | 162 | p = get_kprobe(addr); |
| 154 | if (p) { | 163 | if (p) { |
| 155 | kprobe_opcode_t insn = *p->ainsn.insn; | 164 | kprobe_opcode_t insn = *p->ainsn.insn; |
| 156 | if (kprobe_status == KPROBE_HIT_SS && | 165 | if (kcb->kprobe_status == KPROBE_HIT_SS && |
| 157 | is_trap(insn)) { | 166 | is_trap(insn)) { |
| 158 | regs->msr &= ~MSR_SE; | 167 | regs->msr &= ~MSR_SE; |
| 159 | regs->msr |= kprobe_saved_msr; | 168 | regs->msr |= kcb->kprobe_saved_msr; |
| 160 | unlock_kprobes(); | ||
| 161 | goto no_kprobe; | 169 | goto no_kprobe; |
| 162 | } | 170 | } |
| 163 | /* We have reentered the kprobe_handler(), since | 171 | /* We have reentered the kprobe_handler(), since |
| @@ -166,27 +174,24 @@ static inline int kprobe_handler(struct pt_regs *regs) | |||
| 166 | * just single step on the instruction of the new probe | 174 | * just single step on the instruction of the new probe |
| 167 | * without calling any user handlers. | 175 | * without calling any user handlers. |
| 168 | */ | 176 | */ |
| 169 | save_previous_kprobe(); | 177 | save_previous_kprobe(kcb); |
| 170 | current_kprobe = p; | 178 | set_current_kprobe(p, regs, kcb); |
| 171 | kprobe_saved_msr = regs->msr; | 179 | kcb->kprobe_saved_msr = regs->msr; |
| 172 | p->nmissed++; | 180 | p->nmissed++; |
| 173 | prepare_singlestep(p, regs); | 181 | prepare_singlestep(p, regs); |
| 174 | kprobe_status = KPROBE_REENTER; | 182 | kcb->kprobe_status = KPROBE_REENTER; |
| 175 | return 1; | 183 | return 1; |
| 176 | } else { | 184 | } else { |
| 177 | p = current_kprobe; | 185 | p = __get_cpu_var(current_kprobe); |
| 178 | if (p->break_handler && p->break_handler(p, regs)) { | 186 | if (p->break_handler && p->break_handler(p, regs)) { |
| 179 | goto ss_probe; | 187 | goto ss_probe; |
| 180 | } | 188 | } |
| 181 | } | 189 | } |
| 182 | /* If it's not ours, can't be delete race, (we hold lock). */ | ||
| 183 | goto no_kprobe; | 190 | goto no_kprobe; |
| 184 | } | 191 | } |
| 185 | 192 | ||
| 186 | lock_kprobes(); | ||
| 187 | p = get_kprobe(addr); | 193 | p = get_kprobe(addr); |
| 188 | if (!p) { | 194 | if (!p) { |
| 189 | unlock_kprobes(); | ||
| 190 | if (*addr != BREAKPOINT_INSTRUCTION) { | 195 | if (*addr != BREAKPOINT_INSTRUCTION) { |
| 191 | /* | 196 | /* |
| 192 | * PowerPC has multiple variants of the "trap" | 197 | * PowerPC has multiple variants of the "trap" |
| @@ -209,24 +214,19 @@ static inline int kprobe_handler(struct pt_regs *regs) | |||
| 209 | goto no_kprobe; | 214 | goto no_kprobe; |
| 210 | } | 215 | } |
| 211 | 216 | ||
| 212 | kprobe_status = KPROBE_HIT_ACTIVE; | 217 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
| 213 | current_kprobe = p; | 218 | set_current_kprobe(p, regs, kcb); |
| 214 | kprobe_saved_msr = regs->msr; | ||
| 215 | if (p->pre_handler && p->pre_handler(p, regs)) | 219 | if (p->pre_handler && p->pre_handler(p, regs)) |
| 216 | /* handler has already set things up, so skip ss setup */ | 220 | /* handler has already set things up, so skip ss setup */ |
| 217 | return 1; | 221 | return 1; |
| 218 | 222 | ||
| 219 | ss_probe: | 223 | ss_probe: |
| 220 | prepare_singlestep(p, regs); | 224 | prepare_singlestep(p, regs); |
| 221 | kprobe_status = KPROBE_HIT_SS; | 225 | kcb->kprobe_status = KPROBE_HIT_SS; |
| 222 | /* | ||
| 223 | * This preempt_disable() matches the preempt_enable_no_resched() | ||
| 224 | * in post_kprobe_handler(). | ||
| 225 | */ | ||
| 226 | preempt_disable(); | ||
| 227 | return 1; | 226 | return 1; |
| 228 | 227 | ||
| 229 | no_kprobe: | 228 | no_kprobe: |
| 229 | preempt_enable_no_resched(); | ||
| 230 | return ret; | 230 | return ret; |
| 231 | } | 231 | } |
| 232 | 232 | ||
| @@ -251,9 +251,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 251 | struct kretprobe_instance *ri = NULL; | 251 | struct kretprobe_instance *ri = NULL; |
| 252 | struct hlist_head *head; | 252 | struct hlist_head *head; |
| 253 | struct hlist_node *node, *tmp; | 253 | struct hlist_node *node, *tmp; |
| 254 | unsigned long orig_ret_address = 0; | 254 | unsigned long flags, orig_ret_address = 0; |
| 255 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; | 255 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; |
| 256 | 256 | ||
| 257 | spin_lock_irqsave(&kretprobe_lock, flags); | ||
| 257 | head = kretprobe_inst_table_head(current); | 258 | head = kretprobe_inst_table_head(current); |
| 258 | 259 | ||
| 259 | /* | 260 | /* |
| @@ -292,12 +293,14 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 292 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | 293 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); |
| 293 | regs->nip = orig_ret_address; | 294 | regs->nip = orig_ret_address; |
| 294 | 295 | ||
| 295 | unlock_kprobes(); | 296 | reset_current_kprobe(); |
| 297 | spin_unlock_irqrestore(&kretprobe_lock, flags); | ||
| 298 | preempt_enable_no_resched(); | ||
| 296 | 299 | ||
| 297 | /* | 300 | /* |
| 298 | * By returning a non-zero value, we are telling | 301 | * By returning a non-zero value, we are telling |
| 299 | * kprobe_handler() that we have handled unlocking | 302 | * kprobe_handler() that we don't want the post_handler |
| 300 | * and re-enabling preemption. | 303 | * to run (and have re-enabled preemption) |
| 301 | */ | 304 | */ |
| 302 | return 1; | 305 | return 1; |
| 303 | } | 306 | } |
| @@ -323,23 +326,26 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) | |||
| 323 | 326 | ||
| 324 | static inline int post_kprobe_handler(struct pt_regs *regs) | 327 | static inline int post_kprobe_handler(struct pt_regs *regs) |
| 325 | { | 328 | { |
| 326 | if (!kprobe_running()) | 329 | struct kprobe *cur = kprobe_running(); |
| 330 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 331 | |||
| 332 | if (!cur) | ||
| 327 | return 0; | 333 | return 0; |
| 328 | 334 | ||
| 329 | if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) { | 335 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { |
| 330 | kprobe_status = KPROBE_HIT_SSDONE; | 336 | kcb->kprobe_status = KPROBE_HIT_SSDONE; |
| 331 | current_kprobe->post_handler(current_kprobe, regs, 0); | 337 | cur->post_handler(cur, regs, 0); |
| 332 | } | 338 | } |
| 333 | 339 | ||
| 334 | resume_execution(current_kprobe, regs); | 340 | resume_execution(cur, regs); |
| 335 | regs->msr |= kprobe_saved_msr; | 341 | regs->msr |= kcb->kprobe_saved_msr; |
| 336 | 342 | ||
| 337 | /*Restore back the original saved kprobes variables and continue. */ | 343 | /*Restore back the original saved kprobes variables and continue. */ |
| 338 | if (kprobe_status == KPROBE_REENTER) { | 344 | if (kcb->kprobe_status == KPROBE_REENTER) { |
| 339 | restore_previous_kprobe(); | 345 | restore_previous_kprobe(kcb); |
| 340 | goto out; | 346 | goto out; |
| 341 | } | 347 | } |
| 342 | unlock_kprobes(); | 348 | reset_current_kprobe(); |
| 343 | out: | 349 | out: |
| 344 | preempt_enable_no_resched(); | 350 | preempt_enable_no_resched(); |
| 345 | 351 | ||
| @@ -354,19 +360,20 @@ out: | |||
| 354 | return 1; | 360 | return 1; |
| 355 | } | 361 | } |
| 356 | 362 | ||
| 357 | /* Interrupts disabled, kprobe_lock held. */ | ||
| 358 | static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | 363 | static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
| 359 | { | 364 | { |
| 360 | if (current_kprobe->fault_handler | 365 | struct kprobe *cur = kprobe_running(); |
| 361 | && current_kprobe->fault_handler(current_kprobe, regs, trapnr)) | 366 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
| 367 | |||
| 368 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
| 362 | return 1; | 369 | return 1; |
| 363 | 370 | ||
| 364 | if (kprobe_status & KPROBE_HIT_SS) { | 371 | if (kcb->kprobe_status & KPROBE_HIT_SS) { |
| 365 | resume_execution(current_kprobe, regs); | 372 | resume_execution(cur, regs); |
| 366 | regs->msr &= ~MSR_SE; | 373 | regs->msr &= ~MSR_SE; |
| 367 | regs->msr |= kprobe_saved_msr; | 374 | regs->msr |= kcb->kprobe_saved_msr; |
| 368 | 375 | ||
| 369 | unlock_kprobes(); | 376 | reset_current_kprobe(); |
| 370 | preempt_enable_no_resched(); | 377 | preempt_enable_no_resched(); |
| 371 | } | 378 | } |
| 372 | return 0; | 379 | return 0; |
| @@ -381,11 +388,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
| 381 | struct die_args *args = (struct die_args *)data; | 388 | struct die_args *args = (struct die_args *)data; |
| 382 | int ret = NOTIFY_DONE; | 389 | int ret = NOTIFY_DONE; |
| 383 | 390 | ||
| 384 | /* | ||
| 385 | * Interrupts are not disabled here. We need to disable | ||
| 386 | * preemption, because kprobe_running() uses smp_processor_id(). | ||
| 387 | */ | ||
| 388 | preempt_disable(); | ||
| 389 | switch (val) { | 391 | switch (val) { |
| 390 | case DIE_BPT: | 392 | case DIE_BPT: |
| 391 | if (kprobe_handler(args->regs)) | 393 | if (kprobe_handler(args->regs)) |
| @@ -396,22 +398,25 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
| 396 | ret = NOTIFY_STOP; | 398 | ret = NOTIFY_STOP; |
| 397 | break; | 399 | break; |
| 398 | case DIE_PAGE_FAULT: | 400 | case DIE_PAGE_FAULT: |
| 401 | /* kprobe_running() needs smp_processor_id() */ | ||
| 402 | preempt_disable(); | ||
| 399 | if (kprobe_running() && | 403 | if (kprobe_running() && |
| 400 | kprobe_fault_handler(args->regs, args->trapnr)) | 404 | kprobe_fault_handler(args->regs, args->trapnr)) |
| 401 | ret = NOTIFY_STOP; | 405 | ret = NOTIFY_STOP; |
| 406 | preempt_enable(); | ||
| 402 | break; | 407 | break; |
| 403 | default: | 408 | default: |
| 404 | break; | 409 | break; |
| 405 | } | 410 | } |
| 406 | preempt_enable_no_resched(); | ||
| 407 | return ret; | 411 | return ret; |
| 408 | } | 412 | } |
| 409 | 413 | ||
| 410 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | 414 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) |
| 411 | { | 415 | { |
| 412 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 416 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
| 417 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 413 | 418 | ||
| 414 | memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs)); | 419 | memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs)); |
| 415 | 420 | ||
| 416 | /* setup return addr to the jprobe handler routine */ | 421 | /* setup return addr to the jprobe handler routine */ |
| 417 | regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry); | 422 | regs->nip = (unsigned long)(((func_descr_t *)jp->entry)->entry); |
| @@ -431,12 +436,15 @@ void __kprobes jprobe_return_end(void) | |||
| 431 | 436 | ||
| 432 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | 437 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) |
| 433 | { | 438 | { |
| 439 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 440 | |||
| 434 | /* | 441 | /* |
| 435 | * FIXME - we should ideally be validating that we got here 'cos | 442 | * FIXME - we should ideally be validating that we got here 'cos |
| 436 | * of the "trap" in jprobe_return() above, before restoring the | 443 | * of the "trap" in jprobe_return() above, before restoring the |
| 437 | * saved regs... | 444 | * saved regs... |
| 438 | */ | 445 | */ |
| 439 | memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs)); | 446 | memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs)); |
| 447 | preempt_enable_no_resched(); | ||
| 440 | return 1; | 448 | return 1; |
| 441 | } | 449 | } |
| 442 | 450 | ||
