aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnanth N Mavinakayanahalli <ananth@in.ibm.com>2005-11-07 04:00:13 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-07 10:53:46 -0500
commit3516a46042508a495fac13c2e73530d936ebe015 (patch)
tree2d03117832b5c2439987de73420a38fc3e5983e4
parente7a510f92c1e482a7db05afd3cb84af1f4cfe0bc (diff)
[PATCH] Kprobes: Use RCU for (un)register synchronization - base changes
Changes to the base kprobes infrastructure to use RCU for synchronization during kprobe registration and unregistration. These changes coupled with the arch kprobe changes (next in series): a. serialize registration and unregistration of kprobes. b. enable lockless execution of handlers. Handlers can now run in parallel. Signed-off-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Signed-off-by: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/kprobes.h9
-rw-r--r--kernel/kprobes.c103
2 files changed, 46 insertions, 66 deletions
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 6720305a31e8..cff281cf70cf 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -34,6 +34,8 @@
34#include <linux/notifier.h> 34#include <linux/notifier.h>
35#include <linux/smp.h> 35#include <linux/smp.h>
36#include <linux/percpu.h> 36#include <linux/percpu.h>
37#include <linux/spinlock.h>
38#include <linux/rcupdate.h>
37 39
38#include <asm/kprobes.h> 40#include <asm/kprobes.h>
39 41
@@ -146,10 +148,7 @@ struct kretprobe_instance {
146}; 148};
147 149
148#ifdef CONFIG_KPROBES 150#ifdef CONFIG_KPROBES
149/* Locks kprobe: irq must be disabled */ 151extern spinlock_t kretprobe_lock;
150void lock_kprobes(void);
151void unlock_kprobes(void);
152
153extern int arch_prepare_kprobe(struct kprobe *p); 152extern int arch_prepare_kprobe(struct kprobe *p);
154extern void arch_copy_kprobe(struct kprobe *p); 153extern void arch_copy_kprobe(struct kprobe *p);
155extern void arch_arm_kprobe(struct kprobe *p); 154extern void arch_arm_kprobe(struct kprobe *p);
@@ -160,7 +159,7 @@ extern void show_registers(struct pt_regs *regs);
160extern kprobe_opcode_t *get_insn_slot(void); 159extern kprobe_opcode_t *get_insn_slot(void);
161extern void free_insn_slot(kprobe_opcode_t *slot); 160extern void free_insn_slot(kprobe_opcode_t *slot);
162 161
163/* Get the kprobe at this addr (if any). Must have called lock_kprobes */ 162/* Get the kprobe at this addr (if any) - called under a rcu_read_lock() */
164struct kprobe *get_kprobe(void *addr); 163struct kprobe *get_kprobe(void *addr);
165struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); 164struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk);
166 165
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6da8f9b33d1e..cfef426e4cdc 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -32,7 +32,6 @@
32 * <prasanna@in.ibm.com> added function-return probes. 32 * <prasanna@in.ibm.com> added function-return probes.
33 */ 33 */
34#include <linux/kprobes.h> 34#include <linux/kprobes.h>
35#include <linux/spinlock.h>
36#include <linux/hash.h> 35#include <linux/hash.h>
37#include <linux/init.h> 36#include <linux/init.h>
38#include <linux/slab.h> 37#include <linux/slab.h>
@@ -49,8 +48,8 @@
49static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 48static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
50static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 49static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
51 50
52unsigned int kprobe_cpu = NR_CPUS; 51static DEFINE_SPINLOCK(kprobe_lock); /* Protects kprobe_table */
53static DEFINE_SPINLOCK(kprobe_lock); 52DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
54static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL; 53static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
55 54
56/* 55/*
@@ -153,41 +152,6 @@ void __kprobes free_insn_slot(kprobe_opcode_t *slot)
153 } 152 }
154} 153}
155 154
156/* Locks kprobe: irqs must be disabled */
157void __kprobes lock_kprobes(void)
158{
159 unsigned long flags = 0;
160
161 /* Avoiding local interrupts to happen right after we take the kprobe_lock
162 * and before we get a chance to update kprobe_cpu, this to prevent
163 * deadlock when we have a kprobe on ISR routine and a kprobe on task
164 * routine
165 */
166 local_irq_save(flags);
167
168 spin_lock(&kprobe_lock);
169 kprobe_cpu = smp_processor_id();
170
171 local_irq_restore(flags);
172}
173
174void __kprobes unlock_kprobes(void)
175{
176 unsigned long flags = 0;
177
178 /* Avoiding local interrupts to happen right after we update
179 * kprobe_cpu and before we get a a chance to release kprobe_lock,
180 * this to prevent deadlock when we have a kprobe on ISR routine and
181 * a kprobe on task routine
182 */
183 local_irq_save(flags);
184
185 kprobe_cpu = NR_CPUS;
186 spin_unlock(&kprobe_lock);
187
188 local_irq_restore(flags);
189}
190
191/* We have preemption disabled.. so it is safe to use __ versions */ 155/* We have preemption disabled.. so it is safe to use __ versions */
192static inline void set_kprobe_instance(struct kprobe *kp) 156static inline void set_kprobe_instance(struct kprobe *kp)
193{ 157{
@@ -199,15 +163,20 @@ static inline void reset_kprobe_instance(void)
199 __get_cpu_var(kprobe_instance) = NULL; 163 __get_cpu_var(kprobe_instance) = NULL;
200} 164}
201 165
202/* You have to be holding the kprobe_lock */ 166/*
167 * This routine is called either:
168 * - under the kprobe_lock spinlock - during kprobe_[un]register()
169 * OR
170 * - under an rcu_read_lock() - from arch/xxx/kernel/kprobes.c
171 */
203struct kprobe __kprobes *get_kprobe(void *addr) 172struct kprobe __kprobes *get_kprobe(void *addr)
204{ 173{
205 struct hlist_head *head; 174 struct hlist_head *head;
206 struct hlist_node *node; 175 struct hlist_node *node;
176 struct kprobe *p;
207 177
208 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 178 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
209 hlist_for_each(node, head) { 179 hlist_for_each_entry_rcu(p, node, head, hlist) {
210 struct kprobe *p = hlist_entry(node, struct kprobe, hlist);
211 if (p->addr == addr) 180 if (p->addr == addr)
212 return p; 181 return p;
213 } 182 }
@@ -222,7 +191,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
222{ 191{
223 struct kprobe *kp; 192 struct kprobe *kp;
224 193
225 list_for_each_entry(kp, &p->list, list) { 194 list_for_each_entry_rcu(kp, &p->list, list) {
226 if (kp->pre_handler) { 195 if (kp->pre_handler) {
227 set_kprobe_instance(kp); 196 set_kprobe_instance(kp);
228 if (kp->pre_handler(kp, regs)) 197 if (kp->pre_handler(kp, regs))
@@ -238,7 +207,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
238{ 207{
239 struct kprobe *kp; 208 struct kprobe *kp;
240 209
241 list_for_each_entry(kp, &p->list, list) { 210 list_for_each_entry_rcu(kp, &p->list, list) {
242 if (kp->post_handler) { 211 if (kp->post_handler) {
243 set_kprobe_instance(kp); 212 set_kprobe_instance(kp);
244 kp->post_handler(kp, regs, flags); 213 kp->post_handler(kp, regs, flags);
@@ -277,6 +246,7 @@ static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
277 return ret; 246 return ret;
278} 247}
279 248
249/* Called with kretprobe_lock held */
280struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) 250struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
281{ 251{
282 struct hlist_node *node; 252 struct hlist_node *node;
@@ -286,6 +256,7 @@ struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
286 return NULL; 256 return NULL;
287} 257}
288 258
259/* Called with kretprobe_lock held */
289static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe 260static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
290 *rp) 261 *rp)
291{ 262{
@@ -296,6 +267,7 @@ static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
296 return NULL; 267 return NULL;
297} 268}
298 269
270/* Called with kretprobe_lock held */
299void __kprobes add_rp_inst(struct kretprobe_instance *ri) 271void __kprobes add_rp_inst(struct kretprobe_instance *ri)
300{ 272{
301 /* 273 /*
@@ -314,6 +286,7 @@ void __kprobes add_rp_inst(struct kretprobe_instance *ri)
314 hlist_add_head(&ri->uflist, &ri->rp->used_instances); 286 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
315} 287}
316 288
289/* Called with kretprobe_lock held */
317void __kprobes recycle_rp_inst(struct kretprobe_instance *ri) 290void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
318{ 291{
319 /* remove rp inst off the rprobe_inst_table */ 292 /* remove rp inst off the rprobe_inst_table */
@@ -347,13 +320,13 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
347 struct hlist_node *node, *tmp; 320 struct hlist_node *node, *tmp;
348 unsigned long flags = 0; 321 unsigned long flags = 0;
349 322
350 spin_lock_irqsave(&kprobe_lock, flags); 323 spin_lock_irqsave(&kretprobe_lock, flags);
351 head = kretprobe_inst_table_head(current); 324 head = kretprobe_inst_table_head(current);
352 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 325 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
353 if (ri->task == tk) 326 if (ri->task == tk)
354 recycle_rp_inst(ri); 327 recycle_rp_inst(ri);
355 } 328 }
356 spin_unlock_irqrestore(&kprobe_lock, flags); 329 spin_unlock_irqrestore(&kretprobe_lock, flags);
357} 330}
358 331
359/* 332/*
@@ -364,9 +337,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
364 struct pt_regs *regs) 337 struct pt_regs *regs)
365{ 338{
366 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 339 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
340 unsigned long flags = 0;
367 341
368 /*TODO: consider to only swap the RA after the last pre_handler fired */ 342 /*TODO: consider to only swap the RA after the last pre_handler fired */
343 spin_lock_irqsave(&kretprobe_lock, flags);
369 arch_prepare_kretprobe(rp, regs); 344 arch_prepare_kretprobe(rp, regs);
345 spin_unlock_irqrestore(&kretprobe_lock, flags);
370 return 0; 346 return 0;
371} 347}
372 348
@@ -397,13 +373,13 @@ static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
397 struct kprobe *kp; 373 struct kprobe *kp;
398 374
399 if (p->break_handler) { 375 if (p->break_handler) {
400 list_for_each_entry(kp, &old_p->list, list) { 376 list_for_each_entry_rcu(kp, &old_p->list, list) {
401 if (kp->break_handler) 377 if (kp->break_handler)
402 return -EEXIST; 378 return -EEXIST;
403 } 379 }
404 list_add_tail(&p->list, &old_p->list); 380 list_add_tail_rcu(&p->list, &old_p->list);
405 } else 381 } else
406 list_add(&p->list, &old_p->list); 382 list_add_rcu(&p->list, &old_p->list);
407 return 0; 383 return 0;
408} 384}
409 385
@@ -421,18 +397,18 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
421 ap->break_handler = aggr_break_handler; 397 ap->break_handler = aggr_break_handler;
422 398
423 INIT_LIST_HEAD(&ap->list); 399 INIT_LIST_HEAD(&ap->list);
424 list_add(&p->list, &ap->list); 400 list_add_rcu(&p->list, &ap->list);
425 401
426 INIT_HLIST_NODE(&ap->hlist); 402 INIT_HLIST_NODE(&ap->hlist);
427 hlist_del(&p->hlist); 403 hlist_del_rcu(&p->hlist);
428 hlist_add_head(&ap->hlist, 404 hlist_add_head_rcu(&ap->hlist,
429 &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]); 405 &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
430} 406}
431 407
432/* 408/*
433 * This is the second or subsequent kprobe at the address - handle 409 * This is the second or subsequent kprobe at the address - handle
434 * the intricacies 410 * the intricacies
435 * TODO: Move kcalloc outside the spinlock 411 * TODO: Move kcalloc outside the spin_lock
436 */ 412 */
437static int __kprobes register_aggr_kprobe(struct kprobe *old_p, 413static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
438 struct kprobe *p) 414 struct kprobe *p)
@@ -458,7 +434,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
458static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) 434static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
459{ 435{
460 arch_disarm_kprobe(p); 436 arch_disarm_kprobe(p);
461 hlist_del(&p->hlist); 437 hlist_del_rcu(&p->hlist);
462 spin_unlock_irqrestore(&kprobe_lock, flags); 438 spin_unlock_irqrestore(&kprobe_lock, flags);
463 arch_remove_kprobe(p); 439 arch_remove_kprobe(p);
464} 440}
@@ -466,11 +442,10 @@ static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
466static inline void cleanup_aggr_kprobe(struct kprobe *old_p, 442static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
467 struct kprobe *p, unsigned long flags) 443 struct kprobe *p, unsigned long flags)
468{ 444{
469 list_del(&p->list); 445 list_del_rcu(&p->list);
470 if (list_empty(&old_p->list)) { 446 if (list_empty(&old_p->list))
471 cleanup_kprobe(old_p, flags); 447 cleanup_kprobe(old_p, flags);
472 kfree(old_p); 448 else
473 } else
474 spin_unlock_irqrestore(&kprobe_lock, flags); 449 spin_unlock_irqrestore(&kprobe_lock, flags);
475} 450}
476 451
@@ -493,9 +468,9 @@ int __kprobes register_kprobe(struct kprobe *p)
493 if ((ret = arch_prepare_kprobe(p)) != 0) 468 if ((ret = arch_prepare_kprobe(p)) != 0)
494 goto rm_kprobe; 469 goto rm_kprobe;
495 470
471 p->nmissed = 0;
496 spin_lock_irqsave(&kprobe_lock, flags); 472 spin_lock_irqsave(&kprobe_lock, flags);
497 old_p = get_kprobe(p->addr); 473 old_p = get_kprobe(p->addr);
498 p->nmissed = 0;
499 if (old_p) { 474 if (old_p) {
500 ret = register_aggr_kprobe(old_p, p); 475 ret = register_aggr_kprobe(old_p, p);
501 goto out; 476 goto out;
@@ -503,7 +478,7 @@ int __kprobes register_kprobe(struct kprobe *p)
503 478
504 arch_copy_kprobe(p); 479 arch_copy_kprobe(p);
505 INIT_HLIST_NODE(&p->hlist); 480 INIT_HLIST_NODE(&p->hlist);
506 hlist_add_head(&p->hlist, 481 hlist_add_head_rcu(&p->hlist,
507 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 482 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
508 483
509 arch_arm_kprobe(p); 484 arch_arm_kprobe(p);
@@ -524,10 +499,16 @@ void __kprobes unregister_kprobe(struct kprobe *p)
524 spin_lock_irqsave(&kprobe_lock, flags); 499 spin_lock_irqsave(&kprobe_lock, flags);
525 old_p = get_kprobe(p->addr); 500 old_p = get_kprobe(p->addr);
526 if (old_p) { 501 if (old_p) {
502 /* cleanup_*_kprobe() does the spin_unlock_irqrestore */
527 if (old_p->pre_handler == aggr_pre_handler) 503 if (old_p->pre_handler == aggr_pre_handler)
528 cleanup_aggr_kprobe(old_p, p, flags); 504 cleanup_aggr_kprobe(old_p, p, flags);
529 else 505 else
530 cleanup_kprobe(p, flags); 506 cleanup_kprobe(p, flags);
507
508 synchronize_sched();
509 if (old_p->pre_handler == aggr_pre_handler &&
510 list_empty(&old_p->list))
511 kfree(old_p);
531 } else 512 } else
532 spin_unlock_irqrestore(&kprobe_lock, flags); 513 spin_unlock_irqrestore(&kprobe_lock, flags);
533} 514}
@@ -604,13 +585,13 @@ void __kprobes unregister_kretprobe(struct kretprobe *rp)
604 585
605 unregister_kprobe(&rp->kp); 586 unregister_kprobe(&rp->kp);
606 /* No race here */ 587 /* No race here */
607 spin_lock_irqsave(&kprobe_lock, flags); 588 spin_lock_irqsave(&kretprobe_lock, flags);
608 free_rp_inst(rp); 589 free_rp_inst(rp);
609 while ((ri = get_used_rp_inst(rp)) != NULL) { 590 while ((ri = get_used_rp_inst(rp)) != NULL) {
610 ri->rp = NULL; 591 ri->rp = NULL;
611 hlist_del(&ri->uflist); 592 hlist_del(&ri->uflist);
612 } 593 }
613 spin_unlock_irqrestore(&kprobe_lock, flags); 594 spin_unlock_irqrestore(&kretprobe_lock, flags);
614} 595}
615 596
616static int __init init_kprobes(void) 597static int __init init_kprobes(void)