diff options
-rw-r--r-- | arch/i386/kernel/kprobes.c | 102 | ||||
-rw-r--r-- | arch/i386/kernel/process.c | 15 | ||||
-rw-r--r-- | include/asm-i386/kprobes.h | 3 | ||||
-rw-r--r-- | include/linux/kprobes.h | 90 | ||||
-rw-r--r-- | kernel/kprobes.c | 213 |
5 files changed, 415 insertions, 8 deletions
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 59ff9b455069..048f754bbe23 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
@@ -23,6 +23,9 @@ | |||
23 | * Rusty Russell). | 23 | * Rusty Russell). |
24 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes | 24 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes |
25 | * interface to access function arguments. | 25 | * interface to access function arguments. |
26 | * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston | ||
27 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi | ||
28 | * <prasanna@in.ibm.com> added function-return probes. | ||
26 | */ | 29 | */ |
27 | 30 | ||
28 | #include <linux/config.h> | 31 | #include <linux/config.h> |
@@ -91,6 +94,53 @@ static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | |||
91 | regs->eip = (unsigned long)&p->ainsn.insn; | 94 | regs->eip = (unsigned long)&p->ainsn.insn; |
92 | } | 95 | } |
93 | 96 | ||
97 | struct task_struct *arch_get_kprobe_task(void *ptr) | ||
98 | { | ||
99 | return ((struct thread_info *) (((unsigned long) ptr) & | ||
100 | (~(THREAD_SIZE -1))))->task; | ||
101 | } | ||
102 | |||
103 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | ||
104 | { | ||
105 | unsigned long *sara = (unsigned long *)®s->esp; | ||
106 | struct kretprobe_instance *ri; | ||
107 | static void *orig_ret_addr; | ||
108 | |||
109 | /* | ||
110 | * Save the return address when the return probe hits | ||
111 | * the first time, and use it to populate the (krprobe | ||
112 | * instance)->ret_addr for subsequent return probes at | ||
113 | * the same addrress since stack address would have | ||
114 | * the kretprobe_trampoline by then. | ||
115 | */ | ||
116 | if (((void*) *sara) != kretprobe_trampoline) | ||
117 | orig_ret_addr = (void*) *sara; | ||
118 | |||
119 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
120 | ri->rp = rp; | ||
121 | ri->stack_addr = sara; | ||
122 | ri->ret_addr = orig_ret_addr; | ||
123 | add_rp_inst(ri); | ||
124 | /* Replace the return addr with trampoline addr */ | ||
125 | *sara = (unsigned long) &kretprobe_trampoline; | ||
126 | } else { | ||
127 | rp->nmissed++; | ||
128 | } | ||
129 | } | ||
130 | |||
131 | void arch_kprobe_flush_task(struct task_struct *tk, spinlock_t *kp_lock) | ||
132 | { | ||
133 | unsigned long flags = 0; | ||
134 | struct kretprobe_instance *ri; | ||
135 | spin_lock_irqsave(kp_lock, flags); | ||
136 | while ((ri = get_rp_inst_tsk(tk)) != NULL) { | ||
137 | *((unsigned long *)(ri->stack_addr)) = | ||
138 | (unsigned long) ri->ret_addr; | ||
139 | recycle_rp_inst(ri); | ||
140 | } | ||
141 | spin_unlock_irqrestore(kp_lock, flags); | ||
142 | } | ||
143 | |||
94 | /* | 144 | /* |
95 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they | 145 | * Interrupts are disabled on entry as trap3 is an interrupt gate and they |
96 | * remain disabled thorough out this function. | 146 | * remain disabled thorough out this function. |
@@ -184,6 +234,55 @@ no_kprobe: | |||
184 | } | 234 | } |
185 | 235 | ||
186 | /* | 236 | /* |
237 | * For function-return probes, init_kprobes() establishes a probepoint | ||
238 | * here. When a retprobed function returns, this probe is hit and | ||
239 | * trampoline_probe_handler() runs, calling the kretprobe's handler. | ||
240 | */ | ||
241 | void kretprobe_trampoline_holder(void) | ||
242 | { | ||
243 | asm volatile ( ".global kretprobe_trampoline\n" | ||
244 | "kretprobe_trampoline: \n" | ||
245 | "nop\n"); | ||
246 | } | ||
247 | |||
248 | /* | ||
249 | * Called when we hit the probe point at kretprobe_trampoline | ||
250 | */ | ||
251 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | ||
252 | { | ||
253 | struct task_struct *tsk; | ||
254 | struct kretprobe_instance *ri; | ||
255 | struct hlist_head *head; | ||
256 | struct hlist_node *node; | ||
257 | unsigned long *sara = ((unsigned long *) ®s->esp) - 1; | ||
258 | |||
259 | tsk = arch_get_kprobe_task(sara); | ||
260 | head = kretprobe_inst_table_head(tsk); | ||
261 | |||
262 | hlist_for_each_entry(ri, node, head, hlist) { | ||
263 | if (ri->stack_addr == sara && ri->rp) { | ||
264 | if (ri->rp->handler) | ||
265 | ri->rp->handler(ri, regs); | ||
266 | } | ||
267 | } | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs, | ||
272 | unsigned long flags) | ||
273 | { | ||
274 | struct kretprobe_instance *ri; | ||
275 | /* RA already popped */ | ||
276 | unsigned long *sara = ((unsigned long *)®s->esp) - 1; | ||
277 | |||
278 | while ((ri = get_rp_inst(sara))) { | ||
279 | regs->eip = (unsigned long)ri->ret_addr; | ||
280 | recycle_rp_inst(ri); | ||
281 | } | ||
282 | regs->eflags &= ~TF_MASK; | ||
283 | } | ||
284 | |||
285 | /* | ||
187 | * Called after single-stepping. p->addr is the address of the | 286 | * Called after single-stepping. p->addr is the address of the |
188 | * instruction whose first byte has been replaced by the "int 3" | 287 | * instruction whose first byte has been replaced by the "int 3" |
189 | * instruction. To avoid the SMP problems that can occur when we | 288 | * instruction. To avoid the SMP problems that can occur when we |
@@ -266,7 +365,8 @@ static inline int post_kprobe_handler(struct pt_regs *regs) | |||
266 | if (current_kprobe->post_handler) | 365 | if (current_kprobe->post_handler) |
267 | current_kprobe->post_handler(current_kprobe, regs, 0); | 366 | current_kprobe->post_handler(current_kprobe, regs, 0); |
268 | 367 | ||
269 | resume_execution(current_kprobe, regs); | 368 | if (current_kprobe->post_handler != trampoline_post_handler) |
369 | resume_execution(current_kprobe, regs); | ||
270 | regs->eflags |= kprobe_saved_eflags; | 370 | regs->eflags |= kprobe_saved_eflags; |
271 | 371 | ||
272 | unlock_kprobes(); | 372 | unlock_kprobes(); |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index be3efba7caf7..aea2ce1145df 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/kallsyms.h> | 37 | #include <linux/kallsyms.h> |
38 | #include <linux/ptrace.h> | 38 | #include <linux/ptrace.h> |
39 | #include <linux/random.h> | 39 | #include <linux/random.h> |
40 | #include <linux/kprobes.h> | ||
40 | 41 | ||
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
@@ -339,6 +340,13 @@ void exit_thread(void) | |||
339 | struct task_struct *tsk = current; | 340 | struct task_struct *tsk = current; |
340 | struct thread_struct *t = &tsk->thread; | 341 | struct thread_struct *t = &tsk->thread; |
341 | 342 | ||
343 | /* | ||
344 | * Remove function-return probe instances associated with this task | ||
345 | * and put them back on the free list. Do not insert an exit probe for | ||
346 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
347 | */ | ||
348 | kprobe_flush_task(tsk); | ||
349 | |||
342 | /* The process may have allocated an io port bitmap... nuke it. */ | 350 | /* The process may have allocated an io port bitmap... nuke it. */ |
343 | if (unlikely(NULL != t->io_bitmap_ptr)) { | 351 | if (unlikely(NULL != t->io_bitmap_ptr)) { |
344 | int cpu = get_cpu(); | 352 | int cpu = get_cpu(); |
@@ -362,6 +370,13 @@ void flush_thread(void) | |||
362 | { | 370 | { |
363 | struct task_struct *tsk = current; | 371 | struct task_struct *tsk = current; |
364 | 372 | ||
373 | /* | ||
374 | * Remove function-return probe instances associated with this task | ||
375 | * and put them back on the free list. Do not insert an exit probe for | ||
376 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
377 | */ | ||
378 | kprobe_flush_task(tsk); | ||
379 | |||
365 | memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); | 380 | memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); |
366 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | 381 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); |
367 | /* | 382 | /* |
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index 4092f68d123a..8b6d3a90cd78 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h | |||
@@ -39,6 +39,9 @@ typedef u8 kprobe_opcode_t; | |||
39 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) | 39 | : (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR))) |
40 | 40 | ||
41 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry | 41 | #define JPROBE_ENTRY(pentry) (kprobe_opcode_t *)pentry |
42 | #define ARCH_SUPPORTS_KRETPROBES | ||
43 | |||
44 | void kretprobe_trampoline(void); | ||
42 | 45 | ||
43 | /* Architecture specific copy of original instruction*/ | 46 | /* Architecture specific copy of original instruction*/ |
44 | struct arch_specific_insn { | 47 | struct arch_specific_insn { |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 99ddba5a4e00..fba39f87efec 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -25,21 +25,31 @@ | |||
25 | * Rusty Russell). | 25 | * Rusty Russell). |
26 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes | 26 | * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes |
27 | * interface to access function arguments. | 27 | * interface to access function arguments. |
28 | * 2005-May Hien Nguyen <hien@us.ibm.com> and Jim Keniston | ||
29 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi | ||
30 | * <prasanna@in.ibm.com> added function-return probes. | ||
28 | */ | 31 | */ |
29 | #include <linux/config.h> | 32 | #include <linux/config.h> |
30 | #include <linux/list.h> | 33 | #include <linux/list.h> |
31 | #include <linux/notifier.h> | 34 | #include <linux/notifier.h> |
32 | #include <linux/smp.h> | 35 | #include <linux/smp.h> |
36 | #include <linux/spinlock.h> | ||
37 | |||
33 | #include <asm/kprobes.h> | 38 | #include <asm/kprobes.h> |
34 | 39 | ||
35 | struct kprobe; | 40 | struct kprobe; |
36 | struct pt_regs; | 41 | struct pt_regs; |
42 | struct kretprobe; | ||
43 | struct kretprobe_instance; | ||
37 | typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); | 44 | typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *); |
38 | typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *); | 45 | typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *); |
39 | typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, | 46 | typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *, |
40 | unsigned long flags); | 47 | unsigned long flags); |
41 | typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, | 48 | typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *, |
42 | int trapnr); | 49 | int trapnr); |
50 | typedef int (*kretprobe_handler_t) (struct kretprobe_instance *, | ||
51 | struct pt_regs *); | ||
52 | |||
43 | struct kprobe { | 53 | struct kprobe { |
44 | struct hlist_node hlist; | 54 | struct hlist_node hlist; |
45 | 55 | ||
@@ -85,6 +95,62 @@ struct jprobe { | |||
85 | kprobe_opcode_t *entry; /* probe handling code to jump to */ | 95 | kprobe_opcode_t *entry; /* probe handling code to jump to */ |
86 | }; | 96 | }; |
87 | 97 | ||
98 | #ifdef ARCH_SUPPORTS_KRETPROBES | ||
99 | extern int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs); | ||
100 | extern void trampoline_post_handler(struct kprobe *p, struct pt_regs *regs, | ||
101 | unsigned long flags); | ||
102 | extern struct task_struct *arch_get_kprobe_task(void *ptr); | ||
103 | extern void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs); | ||
104 | extern void arch_kprobe_flush_task(struct task_struct *tk, spinlock_t *kp_lock); | ||
105 | #else /* ARCH_SUPPORTS_KRETPROBES */ | ||
106 | static inline void kretprobe_trampoline(void) | ||
107 | { | ||
108 | } | ||
109 | static inline int trampoline_probe_handler(struct kprobe *p, | ||
110 | struct pt_regs *regs) | ||
111 | { | ||
112 | return 0; | ||
113 | } | ||
114 | static inline void trampoline_post_handler(struct kprobe *p, | ||
115 | struct pt_regs *regs, unsigned long flags) | ||
116 | { | ||
117 | } | ||
118 | static inline void arch_prepare_kretprobe(struct kretprobe *rp, | ||
119 | struct pt_regs *regs) | ||
120 | { | ||
121 | } | ||
122 | static inline void arch_kprobe_flush_task(struct task_struct *tk) | ||
123 | { | ||
124 | } | ||
125 | #define arch_get_kprobe_task(ptr) ((struct task_struct *)NULL) | ||
126 | #endif /* ARCH_SUPPORTS_KRETPROBES */ | ||
127 | /* | ||
128 | * Function-return probe - | ||
129 | * Note: | ||
130 | * User needs to provide a handler function, and initialize maxactive. | ||
131 | * maxactive - The maximum number of instances of the probed function that | ||
132 | * can be active concurrently. | ||
133 | * nmissed - tracks the number of times the probed function's return was | ||
134 | * ignored, due to maxactive being too low. | ||
135 | * | ||
136 | */ | ||
137 | struct kretprobe { | ||
138 | struct kprobe kp; | ||
139 | kretprobe_handler_t handler; | ||
140 | int maxactive; | ||
141 | int nmissed; | ||
142 | struct hlist_head free_instances; | ||
143 | struct hlist_head used_instances; | ||
144 | }; | ||
145 | |||
146 | struct kretprobe_instance { | ||
147 | struct hlist_node uflist; /* either on free list or used list */ | ||
148 | struct hlist_node hlist; | ||
149 | struct kretprobe *rp; | ||
150 | void *ret_addr; | ||
151 | void *stack_addr; | ||
152 | }; | ||
153 | |||
88 | #ifdef CONFIG_KPROBES | 154 | #ifdef CONFIG_KPROBES |
89 | /* Locks kprobe: irq must be disabled */ | 155 | /* Locks kprobe: irq must be disabled */ |
90 | void lock_kprobes(void); | 156 | void lock_kprobes(void); |
@@ -104,6 +170,7 @@ extern void show_registers(struct pt_regs *regs); | |||
104 | 170 | ||
105 | /* Get the kprobe at this addr (if any). Must have called lock_kprobes */ | 171 | /* Get the kprobe at this addr (if any). Must have called lock_kprobes */ |
106 | struct kprobe *get_kprobe(void *addr); | 172 | struct kprobe *get_kprobe(void *addr); |
173 | struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk); | ||
107 | 174 | ||
108 | int register_kprobe(struct kprobe *p); | 175 | int register_kprobe(struct kprobe *p); |
109 | void unregister_kprobe(struct kprobe *p); | 176 | void unregister_kprobe(struct kprobe *p); |
@@ -113,7 +180,16 @@ int register_jprobe(struct jprobe *p); | |||
113 | void unregister_jprobe(struct jprobe *p); | 180 | void unregister_jprobe(struct jprobe *p); |
114 | void jprobe_return(void); | 181 | void jprobe_return(void); |
115 | 182 | ||
116 | #else | 183 | int register_kretprobe(struct kretprobe *rp); |
184 | void unregister_kretprobe(struct kretprobe *rp); | ||
185 | |||
186 | struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp); | ||
187 | struct kretprobe_instance *get_rp_inst(void *sara); | ||
188 | struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk); | ||
189 | void add_rp_inst(struct kretprobe_instance *ri); | ||
190 | void kprobe_flush_task(struct task_struct *tk); | ||
191 | void recycle_rp_inst(struct kretprobe_instance *ri); | ||
192 | #else /* CONFIG_KPROBES */ | ||
117 | static inline int kprobe_running(void) | 193 | static inline int kprobe_running(void) |
118 | { | 194 | { |
119 | return 0; | 195 | return 0; |
@@ -135,5 +211,15 @@ static inline void unregister_jprobe(struct jprobe *p) | |||
135 | static inline void jprobe_return(void) | 211 | static inline void jprobe_return(void) |
136 | { | 212 | { |
137 | } | 213 | } |
138 | #endif | 214 | static inline int register_kretprobe(struct kretprobe *rp) |
215 | { | ||
216 | return -ENOSYS; | ||
217 | } | ||
218 | static inline void unregister_kretprobe(struct kretprobe *rp) | ||
219 | { | ||
220 | } | ||
221 | static inline void kprobe_flush_task(struct task_struct *tk) | ||
222 | { | ||
223 | } | ||
224 | #endif /* CONFIG_KPROBES */ | ||
139 | #endif /* _LINUX_KPROBES_H */ | 225 | #endif /* _LINUX_KPROBES_H */ |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 037142b72a49..692fbf75ab49 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -27,6 +27,9 @@ | |||
27 | * interface to access function arguments. | 27 | * interface to access function arguments. |
28 | * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes | 28 | * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes |
29 | * exceptions notifier to be first on the priority list. | 29 | * exceptions notifier to be first on the priority list. |
30 | * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston | ||
31 | * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi | ||
32 | * <prasanna@in.ibm.com> added function-return probes. | ||
30 | */ | 33 | */ |
31 | #include <linux/kprobes.h> | 34 | #include <linux/kprobes.h> |
32 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
@@ -41,6 +44,7 @@ | |||
41 | #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) | 44 | #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) |
42 | 45 | ||
43 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; | 46 | static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; |
47 | static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; | ||
44 | 48 | ||
45 | unsigned int kprobe_cpu = NR_CPUS; | 49 | unsigned int kprobe_cpu = NR_CPUS; |
46 | static DEFINE_SPINLOCK(kprobe_lock); | 50 | static DEFINE_SPINLOCK(kprobe_lock); |
@@ -78,7 +82,7 @@ struct kprobe *get_kprobe(void *addr) | |||
78 | * Aggregate handlers for multiple kprobes support - these handlers | 82 | * Aggregate handlers for multiple kprobes support - these handlers |
79 | * take care of invoking the individual kprobe handlers on p->list | 83 | * take care of invoking the individual kprobe handlers on p->list |
80 | */ | 84 | */ |
81 | int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | 85 | static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) |
82 | { | 86 | { |
83 | struct kprobe *kp; | 87 | struct kprobe *kp; |
84 | 88 | ||
@@ -92,8 +96,8 @@ int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
92 | return 0; | 96 | return 0; |
93 | } | 97 | } |
94 | 98 | ||
95 | void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | 99 | static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, |
96 | unsigned long flags) | 100 | unsigned long flags) |
97 | { | 101 | { |
98 | struct kprobe *kp; | 102 | struct kprobe *kp; |
99 | 103 | ||
@@ -107,7 +111,8 @@ void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, | |||
107 | return; | 111 | return; |
108 | } | 112 | } |
109 | 113 | ||
110 | int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) | 114 | static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, |
115 | int trapnr) | ||
111 | { | 116 | { |
112 | /* | 117 | /* |
113 | * if we faulted "during" the execution of a user specified | 118 | * if we faulted "during" the execution of a user specified |
@@ -120,6 +125,135 @@ int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) | |||
120 | return 0; | 125 | return 0; |
121 | } | 126 | } |
122 | 127 | ||
128 | struct kprobe trampoline_p = { | ||
129 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | ||
130 | .pre_handler = trampoline_probe_handler, | ||
131 | .post_handler = trampoline_post_handler | ||
132 | }; | ||
133 | |||
134 | struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp) | ||
135 | { | ||
136 | struct hlist_node *node; | ||
137 | struct kretprobe_instance *ri; | ||
138 | hlist_for_each_entry(ri, node, &rp->free_instances, uflist) | ||
139 | return ri; | ||
140 | return NULL; | ||
141 | } | ||
142 | |||
143 | static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp) | ||
144 | { | ||
145 | struct hlist_node *node; | ||
146 | struct kretprobe_instance *ri; | ||
147 | hlist_for_each_entry(ri, node, &rp->used_instances, uflist) | ||
148 | return ri; | ||
149 | return NULL; | ||
150 | } | ||
151 | |||
152 | struct kretprobe_instance *get_rp_inst(void *sara) | ||
153 | { | ||
154 | struct hlist_head *head; | ||
155 | struct hlist_node *node; | ||
156 | struct task_struct *tsk; | ||
157 | struct kretprobe_instance *ri; | ||
158 | |||
159 | tsk = arch_get_kprobe_task(sara); | ||
160 | head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; | ||
161 | hlist_for_each_entry(ri, node, head, hlist) { | ||
162 | if (ri->stack_addr == sara) | ||
163 | return ri; | ||
164 | } | ||
165 | return NULL; | ||
166 | } | ||
167 | |||
168 | void add_rp_inst(struct kretprobe_instance *ri) | ||
169 | { | ||
170 | struct task_struct *tsk; | ||
171 | /* | ||
172 | * Remove rp inst off the free list - | ||
173 | * Add it back when probed function returns | ||
174 | */ | ||
175 | hlist_del(&ri->uflist); | ||
176 | tsk = arch_get_kprobe_task(ri->stack_addr); | ||
177 | /* Add rp inst onto table */ | ||
178 | INIT_HLIST_NODE(&ri->hlist); | ||
179 | hlist_add_head(&ri->hlist, | ||
180 | &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]); | ||
181 | |||
182 | /* Also add this rp inst to the used list. */ | ||
183 | INIT_HLIST_NODE(&ri->uflist); | ||
184 | hlist_add_head(&ri->uflist, &ri->rp->used_instances); | ||
185 | } | ||
186 | |||
187 | void recycle_rp_inst(struct kretprobe_instance *ri) | ||
188 | { | ||
189 | /* remove rp inst off the rprobe_inst_table */ | ||
190 | hlist_del(&ri->hlist); | ||
191 | if (ri->rp) { | ||
192 | /* remove rp inst off the used list */ | ||
193 | hlist_del(&ri->uflist); | ||
194 | /* put rp inst back onto the free list */ | ||
195 | INIT_HLIST_NODE(&ri->uflist); | ||
196 | hlist_add_head(&ri->uflist, &ri->rp->free_instances); | ||
197 | } else | ||
198 | /* Unregistering */ | ||
199 | kfree(ri); | ||
200 | } | ||
201 | |||
202 | struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk) | ||
203 | { | ||
204 | return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; | ||
205 | } | ||
206 | |||
207 | struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk) | ||
208 | { | ||
209 | struct task_struct *tsk; | ||
210 | struct hlist_head *head; | ||
211 | struct hlist_node *node; | ||
212 | struct kretprobe_instance *ri; | ||
213 | |||
214 | head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)]; | ||
215 | |||
216 | hlist_for_each_entry(ri, node, head, hlist) { | ||
217 | tsk = arch_get_kprobe_task(ri->stack_addr); | ||
218 | if (tsk == tk) | ||
219 | return ri; | ||
220 | } | ||
221 | return NULL; | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * This function is called from do_exit or do_execv when task tk's stack is | ||
226 | * about to be recycled. Recycle any function-return probe instances | ||
227 | * associated with this task. These represent probed functions that have | ||
228 | * been called but may never return. | ||
229 | */ | ||
230 | void kprobe_flush_task(struct task_struct *tk) | ||
231 | { | ||
232 | arch_kprobe_flush_task(tk, &kprobe_lock); | ||
233 | } | ||
234 | |||
235 | /* | ||
236 | * This kprobe pre_handler is registered with every kretprobe. When probe | ||
237 | * hits it will set up the return probe. | ||
238 | */ | ||
239 | static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) | ||
240 | { | ||
241 | struct kretprobe *rp = container_of(p, struct kretprobe, kp); | ||
242 | |||
243 | /*TODO: consider to only swap the RA after the last pre_handler fired */ | ||
244 | arch_prepare_kretprobe(rp, regs); | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | static inline void free_rp_inst(struct kretprobe *rp) | ||
249 | { | ||
250 | struct kretprobe_instance *ri; | ||
251 | while ((ri = get_free_rp_inst(rp)) != NULL) { | ||
252 | hlist_del(&ri->uflist); | ||
253 | kfree(ri); | ||
254 | } | ||
255 | } | ||
256 | |||
123 | /* | 257 | /* |
124 | * Fill in the required fields of the "manager kprobe". Replace the | 258 | * Fill in the required fields of the "manager kprobe". Replace the |
125 | * earlier kprobe in the hlist with the manager kprobe | 259 | * earlier kprobe in the hlist with the manager kprobe |
@@ -257,16 +391,82 @@ void unregister_jprobe(struct jprobe *jp) | |||
257 | unregister_kprobe(&jp->kp); | 391 | unregister_kprobe(&jp->kp); |
258 | } | 392 | } |
259 | 393 | ||
394 | #ifdef ARCH_SUPPORTS_KRETPROBES | ||
395 | |||
396 | int register_kretprobe(struct kretprobe *rp) | ||
397 | { | ||
398 | int ret = 0; | ||
399 | struct kretprobe_instance *inst; | ||
400 | int i; | ||
401 | |||
402 | rp->kp.pre_handler = pre_handler_kretprobe; | ||
403 | |||
404 | /* Pre-allocate memory for max kretprobe instances */ | ||
405 | if (rp->maxactive <= 0) { | ||
406 | #ifdef CONFIG_PREEMPT | ||
407 | rp->maxactive = max(10, 2 * NR_CPUS); | ||
408 | #else | ||
409 | rp->maxactive = NR_CPUS; | ||
410 | #endif | ||
411 | } | ||
412 | INIT_HLIST_HEAD(&rp->used_instances); | ||
413 | INIT_HLIST_HEAD(&rp->free_instances); | ||
414 | for (i = 0; i < rp->maxactive; i++) { | ||
415 | inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL); | ||
416 | if (inst == NULL) { | ||
417 | free_rp_inst(rp); | ||
418 | return -ENOMEM; | ||
419 | } | ||
420 | INIT_HLIST_NODE(&inst->uflist); | ||
421 | hlist_add_head(&inst->uflist, &rp->free_instances); | ||
422 | } | ||
423 | |||
424 | rp->nmissed = 0; | ||
425 | /* Establish function entry probe point */ | ||
426 | if ((ret = register_kprobe(&rp->kp)) != 0) | ||
427 | free_rp_inst(rp); | ||
428 | return ret; | ||
429 | } | ||
430 | |||
431 | #else /* ARCH_SUPPORTS_KRETPROBES */ | ||
432 | |||
433 | int register_kretprobe(struct kretprobe *rp) | ||
434 | { | ||
435 | return -ENOSYS; | ||
436 | } | ||
437 | |||
438 | #endif /* ARCH_SUPPORTS_KRETPROBES */ | ||
439 | |||
440 | void unregister_kretprobe(struct kretprobe *rp) | ||
441 | { | ||
442 | unsigned long flags; | ||
443 | struct kretprobe_instance *ri; | ||
444 | |||
445 | unregister_kprobe(&rp->kp); | ||
446 | /* No race here */ | ||
447 | spin_lock_irqsave(&kprobe_lock, flags); | ||
448 | free_rp_inst(rp); | ||
449 | while ((ri = get_used_rp_inst(rp)) != NULL) { | ||
450 | ri->rp = NULL; | ||
451 | hlist_del(&ri->uflist); | ||
452 | } | ||
453 | spin_unlock_irqrestore(&kprobe_lock, flags); | ||
454 | } | ||
455 | |||
260 | static int __init init_kprobes(void) | 456 | static int __init init_kprobes(void) |
261 | { | 457 | { |
262 | int i, err = 0; | 458 | int i, err = 0; |
263 | 459 | ||
264 | /* FIXME allocate the probe table, currently defined statically */ | 460 | /* FIXME allocate the probe table, currently defined statically */ |
265 | /* initialize all list heads */ | 461 | /* initialize all list heads */ |
266 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) | 462 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
267 | INIT_HLIST_HEAD(&kprobe_table[i]); | 463 | INIT_HLIST_HEAD(&kprobe_table[i]); |
464 | INIT_HLIST_HEAD(&kretprobe_inst_table[i]); | ||
465 | } | ||
268 | 466 | ||
269 | err = register_die_notifier(&kprobe_exceptions_nb); | 467 | err = register_die_notifier(&kprobe_exceptions_nb); |
468 | /* Register the trampoline probe for return probe */ | ||
469 | register_kprobe(&trampoline_p); | ||
270 | return err; | 470 | return err; |
271 | } | 471 | } |
272 | 472 | ||
@@ -277,3 +477,6 @@ EXPORT_SYMBOL_GPL(unregister_kprobe); | |||
277 | EXPORT_SYMBOL_GPL(register_jprobe); | 477 | EXPORT_SYMBOL_GPL(register_jprobe); |
278 | EXPORT_SYMBOL_GPL(unregister_jprobe); | 478 | EXPORT_SYMBOL_GPL(unregister_jprobe); |
279 | EXPORT_SYMBOL_GPL(jprobe_return); | 479 | EXPORT_SYMBOL_GPL(jprobe_return); |
480 | EXPORT_SYMBOL_GPL(register_kretprobe); | ||
481 | EXPORT_SYMBOL_GPL(unregister_kretprobe); | ||
482 | |||