aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPrasanna S Panchamukhi <prasanna@in.ibm.com>2005-09-06 18:19:26 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-09-07 19:57:59 -0400
commitd0aaff9796c3310326d10da44fc0faed352a1d29 (patch)
tree591fd8dedf34464989d23bbb0e66a1ccb2fa18a6 /kernel
parent505db03639db34ca2c64fe7ee27190d324281f2c (diff)
[PATCH] Kprobes: prevent possible race conditions generic
There are possible race conditions if probes are placed on routines within the kprobes files and routines used by the kprobes. For example if you put probe on get_kprobe() routines, the system can hang while inserting probes on any routine such as do_fork(). Because while inserting probes on do_fork(), register_kprobes() routine grabs the kprobes spin lock and executes get_kprobe() routine and to handle probe of get_kprobe(), kprobes_handler() gets executed and tries to grab kprobes spin lock, and spins forever. This patch avoids such possible race conditions by preventing probes on routines within the kprobes file and routines used by kprobes. I have modified the patches as per Andi Kleen's suggestion to move kprobes routines and other routines used by kprobes to a seperate section .kprobes.text. Also moved page fault and exception handlers, general protection fault to .kprobes.text section. These patches have been tested on i386, x86_64 and ppc64 architectures, also compiled on ia64 and sparc64 architectures. Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/kprobes.c72
1 files changed, 43 insertions, 29 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index b0237122b24e..3b7653f2e7ae 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -37,6 +37,7 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/moduleloader.h> 39#include <linux/moduleloader.h>
40#include <asm-generic/sections.h>
40#include <asm/cacheflush.h> 41#include <asm/cacheflush.h>
41#include <asm/errno.h> 42#include <asm/errno.h>
42#include <asm/kdebug.h> 43#include <asm/kdebug.h>
@@ -72,7 +73,7 @@ static struct hlist_head kprobe_insn_pages;
72 * get_insn_slot() - Find a slot on an executable page for an instruction. 73 * get_insn_slot() - Find a slot on an executable page for an instruction.
73 * We allocate an executable page if there's no room on existing ones. 74 * We allocate an executable page if there's no room on existing ones.
74 */ 75 */
75kprobe_opcode_t *get_insn_slot(void) 76kprobe_opcode_t __kprobes *get_insn_slot(void)
76{ 77{
77 struct kprobe_insn_page *kip; 78 struct kprobe_insn_page *kip;
78 struct hlist_node *pos; 79 struct hlist_node *pos;
@@ -117,7 +118,7 @@ kprobe_opcode_t *get_insn_slot(void)
117 return kip->insns; 118 return kip->insns;
118} 119}
119 120
120void free_insn_slot(kprobe_opcode_t *slot) 121void __kprobes free_insn_slot(kprobe_opcode_t *slot)
121{ 122{
122 struct kprobe_insn_page *kip; 123 struct kprobe_insn_page *kip;
123 struct hlist_node *pos; 124 struct hlist_node *pos;
@@ -152,20 +153,20 @@ void free_insn_slot(kprobe_opcode_t *slot)
152} 153}
153 154
154/* Locks kprobe: irqs must be disabled */ 155/* Locks kprobe: irqs must be disabled */
155void lock_kprobes(void) 156void __kprobes lock_kprobes(void)
156{ 157{
157 spin_lock(&kprobe_lock); 158 spin_lock(&kprobe_lock);
158 kprobe_cpu = smp_processor_id(); 159 kprobe_cpu = smp_processor_id();
159} 160}
160 161
161void unlock_kprobes(void) 162void __kprobes unlock_kprobes(void)
162{ 163{
163 kprobe_cpu = NR_CPUS; 164 kprobe_cpu = NR_CPUS;
164 spin_unlock(&kprobe_lock); 165 spin_unlock(&kprobe_lock);
165} 166}
166 167
167/* You have to be holding the kprobe_lock */ 168/* You have to be holding the kprobe_lock */
168struct kprobe *get_kprobe(void *addr) 169struct kprobe __kprobes *get_kprobe(void *addr)
169{ 170{
170 struct hlist_head *head; 171 struct hlist_head *head;
171 struct hlist_node *node; 172 struct hlist_node *node;
@@ -183,7 +184,7 @@ struct kprobe *get_kprobe(void *addr)
183 * Aggregate handlers for multiple kprobes support - these handlers 184 * Aggregate handlers for multiple kprobes support - these handlers
184 * take care of invoking the individual kprobe handlers on p->list 185 * take care of invoking the individual kprobe handlers on p->list
185 */ 186 */
186static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) 187static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
187{ 188{
188 struct kprobe *kp; 189 struct kprobe *kp;
189 190
@@ -198,8 +199,8 @@ static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
198 return 0; 199 return 0;
199} 200}
200 201
201static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, 202static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
202 unsigned long flags) 203 unsigned long flags)
203{ 204{
204 struct kprobe *kp; 205 struct kprobe *kp;
205 206
@@ -213,8 +214,8 @@ static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
213 return; 214 return;
214} 215}
215 216
216static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 217static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
217 int trapnr) 218 int trapnr)
218{ 219{
219 /* 220 /*
220 * if we faulted "during" the execution of a user specified 221 * if we faulted "during" the execution of a user specified
@@ -227,7 +228,7 @@ static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
227 return 0; 228 return 0;
228} 229}
229 230
230static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 231static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
231{ 232{
232 struct kprobe *kp = curr_kprobe; 233 struct kprobe *kp = curr_kprobe;
233 if (curr_kprobe && kp->break_handler) { 234 if (curr_kprobe && kp->break_handler) {
@@ -240,7 +241,7 @@ static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
240 return 0; 241 return 0;
241} 242}
242 243
243struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp) 244struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
244{ 245{
245 struct hlist_node *node; 246 struct hlist_node *node;
246 struct kretprobe_instance *ri; 247 struct kretprobe_instance *ri;
@@ -249,7 +250,8 @@ struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
249 return NULL; 250 return NULL;
250} 251}
251 252
252static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp) 253static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
254 *rp)
253{ 255{
254 struct hlist_node *node; 256 struct hlist_node *node;
255 struct kretprobe_instance *ri; 257 struct kretprobe_instance *ri;
@@ -258,7 +260,7 @@ static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
258 return NULL; 260 return NULL;
259} 261}
260 262
261void add_rp_inst(struct kretprobe_instance *ri) 263void __kprobes add_rp_inst(struct kretprobe_instance *ri)
262{ 264{
263 /* 265 /*
264 * Remove rp inst off the free list - 266 * Remove rp inst off the free list -
@@ -276,7 +278,7 @@ void add_rp_inst(struct kretprobe_instance *ri)
276 hlist_add_head(&ri->uflist, &ri->rp->used_instances); 278 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
277} 279}
278 280
279void recycle_rp_inst(struct kretprobe_instance *ri) 281void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
280{ 282{
281 /* remove rp inst off the rprobe_inst_table */ 283 /* remove rp inst off the rprobe_inst_table */
282 hlist_del(&ri->hlist); 284 hlist_del(&ri->hlist);
@@ -291,7 +293,7 @@ void recycle_rp_inst(struct kretprobe_instance *ri)
291 kfree(ri); 293 kfree(ri);
292} 294}
293 295
294struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk) 296struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
295{ 297{
296 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; 298 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
297} 299}
@@ -302,7 +304,7 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
302 * instances associated with this task. These left over instances represent 304 * instances associated with this task. These left over instances represent
303 * probed functions that have been called but will never return. 305 * probed functions that have been called but will never return.
304 */ 306 */
305void kprobe_flush_task(struct task_struct *tk) 307void __kprobes kprobe_flush_task(struct task_struct *tk)
306{ 308{
307 struct kretprobe_instance *ri; 309 struct kretprobe_instance *ri;
308 struct hlist_head *head; 310 struct hlist_head *head;
@@ -322,7 +324,8 @@ void kprobe_flush_task(struct task_struct *tk)
322 * This kprobe pre_handler is registered with every kretprobe. When probe 324 * This kprobe pre_handler is registered with every kretprobe. When probe
323 * hits it will set up the return probe. 325 * hits it will set up the return probe.
324 */ 326 */
325static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) 327static int __kprobes pre_handler_kretprobe(struct kprobe *p,
328 struct pt_regs *regs)
326{ 329{
327 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 330 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
328 331
@@ -353,7 +356,7 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
353* Add the new probe to old_p->list. Fail if this is the 356* Add the new probe to old_p->list. Fail if this is the
354* second jprobe at the address - two jprobes can't coexist 357* second jprobe at the address - two jprobes can't coexist
355*/ 358*/
356static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p) 359static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
357{ 360{
358 struct kprobe *kp; 361 struct kprobe *kp;
359 362
@@ -395,7 +398,8 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
395 * the intricacies 398 * the intricacies
396 * TODO: Move kcalloc outside the spinlock 399 * TODO: Move kcalloc outside the spinlock
397 */ 400 */
398static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) 401static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
402 struct kprobe *p)
399{ 403{
400 int ret = 0; 404 int ret = 0;
401 struct kprobe *ap; 405 struct kprobe *ap;
@@ -434,15 +438,25 @@ static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
434 spin_unlock_irqrestore(&kprobe_lock, flags); 438 spin_unlock_irqrestore(&kprobe_lock, flags);
435} 439}
436 440
437int register_kprobe(struct kprobe *p) 441static int __kprobes in_kprobes_functions(unsigned long addr)
442{
443 if (addr >= (unsigned long)__kprobes_text_start
444 && addr < (unsigned long)__kprobes_text_end)
445 return -EINVAL;
446 return 0;
447}
448
449int __kprobes register_kprobe(struct kprobe *p)
438{ 450{
439 int ret = 0; 451 int ret = 0;
440 unsigned long flags = 0; 452 unsigned long flags = 0;
441 struct kprobe *old_p; 453 struct kprobe *old_p;
442 454
443 if ((ret = arch_prepare_kprobe(p)) != 0) { 455 if ((ret = in_kprobes_functions((unsigned long) p->addr)) != 0)
456 return ret;
457 if ((ret = arch_prepare_kprobe(p)) != 0)
444 goto rm_kprobe; 458 goto rm_kprobe;
445 } 459
446 spin_lock_irqsave(&kprobe_lock, flags); 460 spin_lock_irqsave(&kprobe_lock, flags);
447 old_p = get_kprobe(p->addr); 461 old_p = get_kprobe(p->addr);
448 p->nmissed = 0; 462 p->nmissed = 0;
@@ -466,7 +480,7 @@ rm_kprobe:
466 return ret; 480 return ret;
467} 481}
468 482
469void unregister_kprobe(struct kprobe *p) 483void __kprobes unregister_kprobe(struct kprobe *p)
470{ 484{
471 unsigned long flags; 485 unsigned long flags;
472 struct kprobe *old_p; 486 struct kprobe *old_p;
@@ -487,7 +501,7 @@ static struct notifier_block kprobe_exceptions_nb = {
487 .priority = 0x7fffffff /* we need to notified first */ 501 .priority = 0x7fffffff /* we need to notified first */
488}; 502};
489 503
490int register_jprobe(struct jprobe *jp) 504int __kprobes register_jprobe(struct jprobe *jp)
491{ 505{
492 /* Todo: Verify probepoint is a function entry point */ 506 /* Todo: Verify probepoint is a function entry point */
493 jp->kp.pre_handler = setjmp_pre_handler; 507 jp->kp.pre_handler = setjmp_pre_handler;
@@ -496,14 +510,14 @@ int register_jprobe(struct jprobe *jp)
496 return register_kprobe(&jp->kp); 510 return register_kprobe(&jp->kp);
497} 511}
498 512
499void unregister_jprobe(struct jprobe *jp) 513void __kprobes unregister_jprobe(struct jprobe *jp)
500{ 514{
501 unregister_kprobe(&jp->kp); 515 unregister_kprobe(&jp->kp);
502} 516}
503 517
504#ifdef ARCH_SUPPORTS_KRETPROBES 518#ifdef ARCH_SUPPORTS_KRETPROBES
505 519
506int register_kretprobe(struct kretprobe *rp) 520int __kprobes register_kretprobe(struct kretprobe *rp)
507{ 521{
508 int ret = 0; 522 int ret = 0;
509 struct kretprobe_instance *inst; 523 struct kretprobe_instance *inst;
@@ -540,14 +554,14 @@ int register_kretprobe(struct kretprobe *rp)
540 554
541#else /* ARCH_SUPPORTS_KRETPROBES */ 555#else /* ARCH_SUPPORTS_KRETPROBES */
542 556
543int register_kretprobe(struct kretprobe *rp) 557int __kprobes register_kretprobe(struct kretprobe *rp)
544{ 558{
545 return -ENOSYS; 559 return -ENOSYS;
546} 560}
547 561
548#endif /* ARCH_SUPPORTS_KRETPROBES */ 562#endif /* ARCH_SUPPORTS_KRETPROBES */
549 563
550void unregister_kretprobe(struct kretprobe *rp) 564void __kprobes unregister_kretprobe(struct kretprobe *rp)
551{ 565{
552 unsigned long flags; 566 unsigned long flags;
553 struct kretprobe_instance *ri; 567 struct kretprobe_instance *ri;