diff options
Diffstat (limited to 'kernel/kprobes.c')
| -rw-r--r-- | kernel/kprobes.c | 134 |
1 files changed, 64 insertions, 70 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ce4915dd683a..5beda378cc75 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
| @@ -32,7 +32,6 @@ | |||
| 32 | * <prasanna@in.ibm.com> added function-return probes. | 32 | * <prasanna@in.ibm.com> added function-return probes. |
| 33 | */ | 33 | */ |
| 34 | #include <linux/kprobes.h> | 34 | #include <linux/kprobes.h> |
| 35 | #include <linux/spinlock.h> | ||
| 36 | #include <linux/hash.h> | 35 | #include <linux/hash.h> |
| 37 | #include <linux/init.h> | 36 | #include <linux/init.h> |
| 38 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
| @@ -49,9 +48,9 @@ | |||
| 49 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; | 48 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
| 50 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | 49 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; |
| 51 | 50 | ||
| 52 | unsigned int kprobe_cpu = NR_CPUS; | 51 | static DEFINE_SPINLOCK(kprobe_lock); /* Protects kprobe_table */ |
| 53 | static DEFINE_SPINLOCK(kprobe_lock); | 52 | DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */ |
| 54 | static struct kprobe *curr_kprobe; | 53 | static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; |
| 55 | 54 | ||
| 56 | /* | 55 | /* |
| 57 | * kprobe->ainsn.insn points to the copy of the instruction to be | 56 | * kprobe->ainsn.insn points to the copy of the instruction to be |
| @@ -153,50 +152,31 @@ void __kprobes free_insn_slot(kprobe_opcode_t *slot) | |||
| 153 | } | 152 | } |
| 154 | } | 153 | } |
| 155 | 154 | ||
| 156 | /* Locks kprobe: irqs must be disabled */ | 155 | /* We have preemption disabled.. so it is safe to use __ versions */ |
| 157 | void __kprobes lock_kprobes(void) | 156 | static inline void set_kprobe_instance(struct kprobe *kp) |
| 158 | { | 157 | { |
| 159 | unsigned long flags = 0; | 158 | __get_cpu_var(kprobe_instance) = kp; |
| 160 | |||
| 161 | /* Avoiding local interrupts to happen right after we take the kprobe_lock | ||
| 162 | * and before we get a chance to update kprobe_cpu, this to prevent | ||
| 163 | * deadlock when we have a kprobe on ISR routine and a kprobe on task | ||
| 164 | * routine | ||
| 165 | */ | ||
| 166 | local_irq_save(flags); | ||
| 167 | |||
| 168 | spin_lock(&kprobe_lock); | ||
| 169 | kprobe_cpu = smp_processor_id(); | ||
| 170 | |||
| 171 | local_irq_restore(flags); | ||
| 172 | } | 159 | } |
| 173 | 160 | ||
| 174 | void __kprobes unlock_kprobes(void) | 161 | static inline void reset_kprobe_instance(void) |
| 175 | { | 162 | { |
| 176 | unsigned long flags = 0; | 163 | __get_cpu_var(kprobe_instance) = NULL; |
| 177 | |||
| 178 | /* Avoiding local interrupts to happen right after we update | ||
| 179 | * kprobe_cpu and before we get a a chance to release kprobe_lock, | ||
| 180 | * this to prevent deadlock when we have a kprobe on ISR routine and | ||
| 181 | * a kprobe on task routine | ||
| 182 | */ | ||
| 183 | local_irq_save(flags); | ||
| 184 | |||
| 185 | kprobe_cpu = NR_CPUS; | ||
| 186 | spin_unlock(&kprobe_lock); | ||
| 187 | |||
| 188 | local_irq_restore(flags); | ||
| 189 | } | 164 | } |
| 190 | 165 | ||
| 191 | /* You have to be holding the kprobe_lock */ | 166 | /* |
| 167 | * This routine is called either: | ||
| 168 | * - under the kprobe_lock spinlock - during kprobe_[un]register() | ||
| 169 | * OR | ||
| 170 | * - with preemption disabled - from arch/xxx/kernel/kprobes.c | ||
| 171 | */ | ||
| 192 | struct kprobe __kprobes *get_kprobe(void *addr) | 172 | struct kprobe __kprobes *get_kprobe(void *addr) |
| 193 | { | 173 | { |
| 194 | struct hlist_head *head; | 174 | struct hlist_head *head; |
| 195 | struct hlist_node *node; | 175 | struct hlist_node *node; |
| 176 | struct kprobe *p; | ||
| 196 | 177 | ||
| 197 | head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; | 178 | head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; |
| 198 | hlist_for_each(node, head) { | 179 | hlist_for_each_entry_rcu(p, node, head, hlist) { |
| 199 | struct kprobe *p = hlist_entry(node, struct kprobe, hlist); | ||
| 200 | if (p->addr == addr) | 180 | if (p->addr == addr) |
| 201 | return p; | 181 | return p; |
| 202 | } | 182 | } |
| @@ -211,13 +191,13 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 211 | { | 191 | { |
| 212 | struct kprobe *kp; | 192 | struct kprobe *kp; |
| 213 | 193 | ||
| 214 | list_for_each_entry(kp, &p->list, list) { | 194 | list_for_each_entry_rcu(kp, &p->list, list) { |
| 215 | if (kp->pre_handler) { | 195 | if (kp->pre_handler) { |
| 216 | curr_kprobe = kp; | 196 | set_kprobe_instance(kp); |
| 217 | if (kp->pre_handler(kp, regs)) | 197 | if (kp->pre_handler(kp, regs)) |
| 218 | return 1; | 198 | return 1; |
| 219 | } | 199 | } |
| 220 | curr_kprobe = NULL; | 200 | reset_kprobe_instance(); |
| 221 | } | 201 | } |
| 222 | return 0; | 202 | return 0; |
| 223 | } | 203 | } |
| @@ -227,11 +207,11 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
| 227 | { | 207 | { |
| 228 | struct kprobe *kp; | 208 | struct kprobe *kp; |
| 229 | 209 | ||
| 230 | list_for_each_entry(kp, &p->list, list) { | 210 | list_for_each_entry_rcu(kp, &p->list, list) { |
| 231 | if (kp->post_handler) { | 211 | if (kp->post_handler) { |
| 232 | curr_kprobe = kp; | 212 | set_kprobe_instance(kp); |
| 233 | kp->post_handler(kp, regs, flags); | 213 | kp->post_handler(kp, regs, flags); |
| 234 | curr_kprobe = NULL; | 214 | reset_kprobe_instance(); |
| 235 | } | 215 | } |
| 236 | } | 216 | } |
| 237 | return; | 217 | return; |
| @@ -240,12 +220,14 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
| 240 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, | 220 | static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, |
| 241 | int trapnr) | 221 | int trapnr) |
| 242 | { | 222 | { |
| 223 | struct kprobe *cur = __get_cpu_var(kprobe_instance); | ||
| 224 | |||
| 243 | /* | 225 | /* |
| 244 | * if we faulted "during" the execution of a user specified | 226 | * if we faulted "during" the execution of a user specified |
| 245 | * probe handler, invoke just that probe's fault handler | 227 | * probe handler, invoke just that probe's fault handler |
| 246 | */ | 228 | */ |
| 247 | if (curr_kprobe && curr_kprobe->fault_handler) { | 229 | if (cur && cur->fault_handler) { |
| 248 | if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr)) | 230 | if (cur->fault_handler(cur, regs, trapnr)) |
| 249 | return 1; | 231 | return 1; |
| 250 | } | 232 | } |
| 251 | return 0; | 233 | return 0; |
| @@ -253,17 +235,18 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, | |||
| 253 | 235 | ||
| 254 | static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) | 236 | static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) |
| 255 | { | 237 | { |
| 256 | struct kprobe *kp = curr_kprobe; | 238 | struct kprobe *cur = __get_cpu_var(kprobe_instance); |
| 257 | if (curr_kprobe && kp->break_handler) { | 239 | int ret = 0; |
| 258 | if (kp->break_handler(kp, regs)) { | 240 | |
| 259 | curr_kprobe = NULL; | 241 | if (cur && cur->break_handler) { |
| 260 | return 1; | 242 | if (cur->break_handler(cur, regs)) |
| 261 | } | 243 | ret = 1; |
| 262 | } | 244 | } |
| 263 | curr_kprobe = NULL; | 245 | reset_kprobe_instance(); |
| 264 | return 0; | 246 | return ret; |
| 265 | } | 247 | } |
| 266 | 248 | ||
| 249 | /* Called with kretprobe_lock held */ | ||
| 267 | struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) | 250 | struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) |
| 268 | { | 251 | { |
| 269 | struct hlist_node *node; | 252 | struct hlist_node *node; |
| @@ -273,6 +256,7 @@ struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) | |||
| 273 | return NULL; | 256 | return NULL; |
| 274 | } | 257 | } |
| 275 | 258 | ||
| 259 | /* Called with kretprobe_lock held */ | ||
| 276 | static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe | 260 | static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe |
| 277 | *rp) | 261 | *rp) |
| 278 | { | 262 | { |
| @@ -283,6 +267,7 @@ static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe | |||
| 283 | return NULL; | 267 | return NULL; |
| 284 | } | 268 | } |
| 285 | 269 | ||
| 270 | /* Called with kretprobe_lock held */ | ||
| 286 | void __kprobes add_rp_inst(struct kretprobe_instance *ri) | 271 | void __kprobes add_rp_inst(struct kretprobe_instance *ri) |
| 287 | { | 272 | { |
| 288 | /* | 273 | /* |
| @@ -301,6 +286,7 @@ void __kprobes add_rp_inst(struct kretprobe_instance *ri) | |||
| 301 | hlist_add_head(&ri->uflist, &ri->rp->used_instances); | 286 | hlist_add_head(&ri->uflist, &ri->rp->used_instances); |
| 302 | } | 287 | } |
| 303 | 288 | ||
| 289 | /* Called with kretprobe_lock held */ | ||
| 304 | void __kprobes recycle_rp_inst(struct kretprobe_instance *ri) | 290 | void __kprobes recycle_rp_inst(struct kretprobe_instance *ri) |
| 305 | { | 291 | { |
| 306 | /* remove rp inst off the rprobe_inst_table */ | 292 | /* remove rp inst off the rprobe_inst_table */ |
| @@ -334,13 +320,13 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
| 334 | struct hlist_node *node, *tmp; | 320 | struct hlist_node *node, *tmp; |
| 335 | unsigned long flags = 0; | 321 | unsigned long flags = 0; |
| 336 | 322 | ||
| 337 | spin_lock_irqsave(&kprobe_lock, flags); | 323 | spin_lock_irqsave(&kretprobe_lock, flags); |
| 338 | head = kretprobe_inst_table_head(current); | 324 | head = kretprobe_inst_table_head(current); |
| 339 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | 325 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
| 340 | if (ri->task == tk) | 326 | if (ri->task == tk) |
| 341 | recycle_rp_inst(ri); | 327 | recycle_rp_inst(ri); |
| 342 | } | 328 | } |
| 343 | spin_unlock_irqrestore(&kprobe_lock, flags); | 329 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
| 344 | } | 330 | } |
| 345 | 331 | ||
| 346 | /* | 332 | /* |
| @@ -351,9 +337,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p, | |||
| 351 | struct pt_regs *regs) | 337 | struct pt_regs *regs) |
| 352 | { | 338 | { |
| 353 | struct kretprobe *rp = container_of(p, struct kretprobe, kp); | 339 | struct kretprobe *rp = container_of(p, struct kretprobe, kp); |
| 340 | unsigned long flags = 0; | ||
| 354 | 341 | ||
| 355 | /*TODO: consider to only swap the RA after the last pre_handler fired */ | 342 | /*TODO: consider to only swap the RA after the last pre_handler fired */ |
| 343 | spin_lock_irqsave(&kretprobe_lock, flags); | ||
| 356 | arch_prepare_kretprobe(rp, regs); | 344 | arch_prepare_kretprobe(rp, regs); |
| 345 | spin_unlock_irqrestore(&kretprobe_lock, flags); | ||
| 357 | return 0; | 346 | return 0; |
| 358 | } | 347 | } |
| 359 | 348 | ||
| @@ -384,13 +373,13 @@ static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p) | |||
| 384 | struct kprobe *kp; | 373 | struct kprobe *kp; |
| 385 | 374 | ||
| 386 | if (p->break_handler) { | 375 | if (p->break_handler) { |
| 387 | list_for_each_entry(kp, &old_p->list, list) { | 376 | list_for_each_entry_rcu(kp, &old_p->list, list) { |
| 388 | if (kp->break_handler) | 377 | if (kp->break_handler) |
| 389 | return -EEXIST; | 378 | return -EEXIST; |
| 390 | } | 379 | } |
| 391 | list_add_tail(&p->list, &old_p->list); | 380 | list_add_tail_rcu(&p->list, &old_p->list); |
| 392 | } else | 381 | } else |
| 393 | list_add(&p->list, &old_p->list); | 382 | list_add_rcu(&p->list, &old_p->list); |
| 394 | return 0; | 383 | return 0; |
| 395 | } | 384 | } |
| 396 | 385 | ||
| @@ -408,18 +397,18 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) | |||
| 408 | ap->break_handler = aggr_break_handler; | 397 | ap->break_handler = aggr_break_handler; |
| 409 | 398 | ||
| 410 | INIT_LIST_HEAD(&ap->list); | 399 | INIT_LIST_HEAD(&ap->list); |
| 411 | list_add(&p->list, &ap->list); | 400 | list_add_rcu(&p->list, &ap->list); |
| 412 | 401 | ||
| 413 | INIT_HLIST_NODE(&ap->hlist); | 402 | INIT_HLIST_NODE(&ap->hlist); |
| 414 | hlist_del(&p->hlist); | 403 | hlist_del_rcu(&p->hlist); |
| 415 | hlist_add_head(&ap->hlist, | 404 | hlist_add_head_rcu(&ap->hlist, |
| 416 | &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]); | 405 | &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]); |
| 417 | } | 406 | } |
| 418 | 407 | ||
| 419 | /* | 408 | /* |
| 420 | * This is the second or subsequent kprobe at the address - handle | 409 | * This is the second or subsequent kprobe at the address - handle |
| 421 | * the intricacies | 410 | * the intricacies |
| 422 | * TODO: Move kcalloc outside the spinlock | 411 | * TODO: Move kcalloc outside the spin_lock |
| 423 | */ | 412 | */ |
| 424 | static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | 413 | static int __kprobes register_aggr_kprobe(struct kprobe *old_p, |
| 425 | struct kprobe *p) | 414 | struct kprobe *p) |
| @@ -445,7 +434,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p, | |||
| 445 | static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) | 434 | static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) |
| 446 | { | 435 | { |
| 447 | arch_disarm_kprobe(p); | 436 | arch_disarm_kprobe(p); |
| 448 | hlist_del(&p->hlist); | 437 | hlist_del_rcu(&p->hlist); |
| 449 | spin_unlock_irqrestore(&kprobe_lock, flags); | 438 | spin_unlock_irqrestore(&kprobe_lock, flags); |
| 450 | arch_remove_kprobe(p); | 439 | arch_remove_kprobe(p); |
| 451 | } | 440 | } |
| @@ -453,11 +442,10 @@ static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) | |||
| 453 | static inline void cleanup_aggr_kprobe(struct kprobe *old_p, | 442 | static inline void cleanup_aggr_kprobe(struct kprobe *old_p, |
| 454 | struct kprobe *p, unsigned long flags) | 443 | struct kprobe *p, unsigned long flags) |
| 455 | { | 444 | { |
| 456 | list_del(&p->list); | 445 | list_del_rcu(&p->list); |
| 457 | if (list_empty(&old_p->list)) { | 446 | if (list_empty(&old_p->list)) |
| 458 | cleanup_kprobe(old_p, flags); | 447 | cleanup_kprobe(old_p, flags); |
| 459 | kfree(old_p); | 448 | else |
| 460 | } else | ||
| 461 | spin_unlock_irqrestore(&kprobe_lock, flags); | 449 | spin_unlock_irqrestore(&kprobe_lock, flags); |
| 462 | } | 450 | } |
| 463 | 451 | ||
| @@ -480,9 +468,9 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
| 480 | if ((ret = arch_prepare_kprobe(p)) != 0) | 468 | if ((ret = arch_prepare_kprobe(p)) != 0) |
| 481 | goto rm_kprobe; | 469 | goto rm_kprobe; |
| 482 | 470 | ||
| 471 | p->nmissed = 0; | ||
| 483 | spin_lock_irqsave(&kprobe_lock, flags); | 472 | spin_lock_irqsave(&kprobe_lock, flags); |
| 484 | old_p = get_kprobe(p->addr); | 473 | old_p = get_kprobe(p->addr); |
| 485 | p->nmissed = 0; | ||
| 486 | if (old_p) { | 474 | if (old_p) { |
| 487 | ret = register_aggr_kprobe(old_p, p); | 475 | ret = register_aggr_kprobe(old_p, p); |
| 488 | goto out; | 476 | goto out; |
| @@ -490,7 +478,7 @@ int __kprobes register_kprobe(struct kprobe *p) | |||
| 490 | 478 | ||
| 491 | arch_copy_kprobe(p); | 479 | arch_copy_kprobe(p); |
| 492 | INIT_HLIST_NODE(&p->hlist); | 480 | INIT_HLIST_NODE(&p->hlist); |
| 493 | hlist_add_head(&p->hlist, | 481 | hlist_add_head_rcu(&p->hlist, |
| 494 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); | 482 | &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); |
| 495 | 483 | ||
| 496 | arch_arm_kprobe(p); | 484 | arch_arm_kprobe(p); |
| @@ -511,10 +499,16 @@ void __kprobes unregister_kprobe(struct kprobe *p) | |||
| 511 | spin_lock_irqsave(&kprobe_lock, flags); | 499 | spin_lock_irqsave(&kprobe_lock, flags); |
| 512 | old_p = get_kprobe(p->addr); | 500 | old_p = get_kprobe(p->addr); |
| 513 | if (old_p) { | 501 | if (old_p) { |
| 502 | /* cleanup_*_kprobe() does the spin_unlock_irqrestore */ | ||
| 514 | if (old_p->pre_handler == aggr_pre_handler) | 503 | if (old_p->pre_handler == aggr_pre_handler) |
| 515 | cleanup_aggr_kprobe(old_p, p, flags); | 504 | cleanup_aggr_kprobe(old_p, p, flags); |
| 516 | else | 505 | else |
| 517 | cleanup_kprobe(p, flags); | 506 | cleanup_kprobe(p, flags); |
| 507 | |||
| 508 | synchronize_sched(); | ||
| 509 | if (old_p->pre_handler == aggr_pre_handler && | ||
| 510 | list_empty(&old_p->list)) | ||
| 511 | kfree(old_p); | ||
| 518 | } else | 512 | } else |
| 519 | spin_unlock_irqrestore(&kprobe_lock, flags); | 513 | spin_unlock_irqrestore(&kprobe_lock, flags); |
| 520 | } | 514 | } |
| @@ -591,13 +585,13 @@ void __kprobes unregister_kretprobe(struct kretprobe *rp) | |||
| 591 | 585 | ||
| 592 | unregister_kprobe(&rp->kp); | 586 | unregister_kprobe(&rp->kp); |
| 593 | /* No race here */ | 587 | /* No race here */ |
| 594 | spin_lock_irqsave(&kprobe_lock, flags); | 588 | spin_lock_irqsave(&kretprobe_lock, flags); |
| 595 | free_rp_inst(rp); | 589 | free_rp_inst(rp); |
| 596 | while ((ri = get_used_rp_inst(rp)) != NULL) { | 590 | while ((ri = get_used_rp_inst(rp)) != NULL) { |
| 597 | ri->rp = NULL; | 591 | ri->rp = NULL; |
| 598 | hlist_del(&ri->uflist); | 592 | hlist_del(&ri->uflist); |
| 599 | } | 593 | } |
| 600 | spin_unlock_irqrestore(&kprobe_lock, flags); | 594 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
| 601 | } | 595 | } |
| 602 | 596 | ||
| 603 | static int __init init_kprobes(void) | 597 | static int __init init_kprobes(void) |
