aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c170
1 files changed, 120 insertions, 50 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 334f37472c56..90c0e82b650c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -36,6 +36,7 @@
36#include <linux/hash.h> 36#include <linux/hash.h>
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/module.h> 38#include <linux/module.h>
39#include <linux/moduleloader.h>
39#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
40#include <asm/errno.h> 41#include <asm/errno.h>
41#include <asm/kdebug.h> 42#include <asm/kdebug.h>
@@ -50,6 +51,106 @@ unsigned int kprobe_cpu = NR_CPUS;
50static DEFINE_SPINLOCK(kprobe_lock); 51static DEFINE_SPINLOCK(kprobe_lock);
51static struct kprobe *curr_kprobe; 52static struct kprobe *curr_kprobe;
52 53
54/*
55 * kprobe->ainsn.insn points to the copy of the instruction to be
56 * single-stepped. x86_64, POWER4 and above have no-exec support and
57 * stepping on the instruction on a vmalloced/kmalloced/data page
58 * is a recipe for disaster
59 */
60#define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
61
62struct kprobe_insn_page {
63 struct hlist_node hlist;
64 kprobe_opcode_t *insns; /* Page of instruction slots */
65 char slot_used[INSNS_PER_PAGE];
66 int nused;
67};
68
69static struct hlist_head kprobe_insn_pages;
70
71/**
72 * get_insn_slot() - Find a slot on an executable page for an instruction.
73 * We allocate an executable page if there's no room on existing ones.
74 */
75kprobe_opcode_t *get_insn_slot(void)
76{
77 struct kprobe_insn_page *kip;
78 struct hlist_node *pos;
79
80 hlist_for_each(pos, &kprobe_insn_pages) {
81 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
82 if (kip->nused < INSNS_PER_PAGE) {
83 int i;
84 for (i = 0; i < INSNS_PER_PAGE; i++) {
85 if (!kip->slot_used[i]) {
86 kip->slot_used[i] = 1;
87 kip->nused++;
88 return kip->insns + (i * MAX_INSN_SIZE);
89 }
90 }
91 /* Surprise! No unused slots. Fix kip->nused. */
92 kip->nused = INSNS_PER_PAGE;
93 }
94 }
95
96 /* All out of space. Need to allocate a new page. Use slot 0.*/
97 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
98 if (!kip) {
99 return NULL;
100 }
101
102 /*
103 * Use module_alloc so this page is within +/- 2GB of where the
104 * kernel image and loaded module images reside. This is required
105 * so x86_64 can correctly handle the %rip-relative fixups.
106 */
107 kip->insns = module_alloc(PAGE_SIZE);
108 if (!kip->insns) {
109 kfree(kip);
110 return NULL;
111 }
112 INIT_HLIST_NODE(&kip->hlist);
113 hlist_add_head(&kip->hlist, &kprobe_insn_pages);
114 memset(kip->slot_used, 0, INSNS_PER_PAGE);
115 kip->slot_used[0] = 1;
116 kip->nused = 1;
117 return kip->insns;
118}
119
120void free_insn_slot(kprobe_opcode_t *slot)
121{
122 struct kprobe_insn_page *kip;
123 struct hlist_node *pos;
124
125 hlist_for_each(pos, &kprobe_insn_pages) {
126 kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
127 if (kip->insns <= slot &&
128 slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
129 int i = (slot - kip->insns) / MAX_INSN_SIZE;
130 kip->slot_used[i] = 0;
131 kip->nused--;
132 if (kip->nused == 0) {
133 /*
134 * Page is no longer in use. Free it unless
135 * it's the last one. We keep the last one
136 * so as not to have to set it up again the
137 * next time somebody inserts a probe.
138 */
139 hlist_del(&kip->hlist);
140 if (hlist_empty(&kprobe_insn_pages)) {
141 INIT_HLIST_NODE(&kip->hlist);
142 hlist_add_head(&kip->hlist,
143 &kprobe_insn_pages);
144 } else {
145 module_free(NULL, kip->insns);
146 kfree(kip);
147 }
148 }
149 return;
150 }
151 }
152}
153
53/* Locks kprobe: irqs must be disabled */ 154/* Locks kprobe: irqs must be disabled */
54void lock_kprobes(void) 155void lock_kprobes(void)
55{ 156{
@@ -139,12 +240,6 @@ static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
139 return 0; 240 return 0;
140} 241}
141 242
142struct kprobe trampoline_p = {
143 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
144 .pre_handler = trampoline_probe_handler,
145 .post_handler = trampoline_post_handler
146};
147
148struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp) 243struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
149{ 244{
150 struct hlist_node *node; 245 struct hlist_node *node;
@@ -163,35 +258,18 @@ static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
163 return NULL; 258 return NULL;
164} 259}
165 260
166struct kretprobe_instance *get_rp_inst(void *sara)
167{
168 struct hlist_head *head;
169 struct hlist_node *node;
170 struct task_struct *tsk;
171 struct kretprobe_instance *ri;
172
173 tsk = arch_get_kprobe_task(sara);
174 head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
175 hlist_for_each_entry(ri, node, head, hlist) {
176 if (ri->stack_addr == sara)
177 return ri;
178 }
179 return NULL;
180}
181
182void add_rp_inst(struct kretprobe_instance *ri) 261void add_rp_inst(struct kretprobe_instance *ri)
183{ 262{
184 struct task_struct *tsk;
185 /* 263 /*
186 * Remove rp inst off the free list - 264 * Remove rp inst off the free list -
187 * Add it back when probed function returns 265 * Add it back when probed function returns
188 */ 266 */
189 hlist_del(&ri->uflist); 267 hlist_del(&ri->uflist);
190 tsk = arch_get_kprobe_task(ri->stack_addr); 268
191 /* Add rp inst onto table */ 269 /* Add rp inst onto table */
192 INIT_HLIST_NODE(&ri->hlist); 270 INIT_HLIST_NODE(&ri->hlist);
193 hlist_add_head(&ri->hlist, 271 hlist_add_head(&ri->hlist,
194 &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]); 272 &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
195 273
196 /* Also add this rp inst to the used list. */ 274 /* Also add this rp inst to the used list. */
197 INIT_HLIST_NODE(&ri->uflist); 275 INIT_HLIST_NODE(&ri->uflist);
@@ -218,34 +296,25 @@ struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
218 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; 296 return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
219} 297}
220 298
221struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk)
222{
223 struct task_struct *tsk;
224 struct hlist_head *head;
225 struct hlist_node *node;
226 struct kretprobe_instance *ri;
227
228 head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)];
229
230 hlist_for_each_entry(ri, node, head, hlist) {
231 tsk = arch_get_kprobe_task(ri->stack_addr);
232 if (tsk == tk)
233 return ri;
234 }
235 return NULL;
236}
237
238/* 299/*
239 * This function is called from do_exit or do_execv when task tk's stack is 300 * This function is called from exit_thread or flush_thread when task tk's
240 * about to be recycled. Recycle any function-return probe instances 301 * stack is being recycled so that we can recycle any function-return probe
241 * associated with this task. These represent probed functions that have 302 * instances associated with this task. These left over instances represent
242 * been called but may never return. 303 * probed functions that have been called but will never return.
243 */ 304 */
244void kprobe_flush_task(struct task_struct *tk) 305void kprobe_flush_task(struct task_struct *tk)
245{ 306{
307 struct kretprobe_instance *ri;
308 struct hlist_head *head;
309 struct hlist_node *node, *tmp;
246 unsigned long flags = 0; 310 unsigned long flags = 0;
311
247 spin_lock_irqsave(&kprobe_lock, flags); 312 spin_lock_irqsave(&kprobe_lock, flags);
248 arch_kprobe_flush_task(tk); 313 head = kretprobe_inst_table_head(current);
314 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
315 if (ri->task == tk)
316 recycle_rp_inst(ri);
317 }
249 spin_unlock_irqrestore(&kprobe_lock, flags); 318 spin_unlock_irqrestore(&kprobe_lock, flags);
250} 319}
251 320
@@ -505,9 +574,10 @@ static int __init init_kprobes(void)
505 INIT_HLIST_HEAD(&kretprobe_inst_table[i]); 574 INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
506 } 575 }
507 576
508 err = register_die_notifier(&kprobe_exceptions_nb); 577 err = arch_init();
509 /* Register the trampoline probe for return probe */ 578 if (!err)
510 register_kprobe(&trampoline_p); 579 err = register_die_notifier(&kprobe_exceptions_nb);
580
511 return err; 581 return err;
512} 582}
513 583