aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnanth N Mavinakayanahalli <amavin@redhat.com>2005-05-05 19:15:42 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-05-05 19:36:39 -0400
commit64f562c6df3cfc5d1b2b4bdbcb7951457df9c237 (patch)
tree5fecfd97cfa1965185ec2254668d90d8f03e3f5f
parent04dea5f93231204cc3ca0ab793ce76dbb10c86ba (diff)
[PATCH] kprobes: Allow multiple kprobes at the same address
Allow registration of multiple kprobes at an address in an architecture agnostic way. Corresponding handlers will be invoked in a sequence. But, a kprobe and a jprobe can't (yet) co-exist at the same address. Signed-off-by: Ananth N Mavinakayanahalli <amavin@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/kprobes.h3
-rw-r--r--kernel/kprobes.c144
2 files changed, 134 insertions, 13 deletions
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index f20c163de4f5..99ddba5a4e00 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -43,6 +43,9 @@ typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
43struct kprobe { 43struct kprobe {
44 struct hlist_node hlist; 44 struct hlist_node hlist;
45 45
46 /* list of kprobes for multi-handler support */
47 struct list_head list;
48
46 /* location of the probe point */ 49 /* location of the probe point */
47 kprobe_opcode_t *addr; 50 kprobe_opcode_t *addr;
48 51
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d8903e60c99a..037142b72a49 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -44,6 +44,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
44 44
45unsigned int kprobe_cpu = NR_CPUS; 45unsigned int kprobe_cpu = NR_CPUS;
46static DEFINE_SPINLOCK(kprobe_lock); 46static DEFINE_SPINLOCK(kprobe_lock);
47static struct kprobe *curr_kprobe;
47 48
48/* Locks kprobe: irqs must be disabled */ 49/* Locks kprobe: irqs must be disabled */
49void lock_kprobes(void) 50void lock_kprobes(void)
@@ -73,22 +74,139 @@ struct kprobe *get_kprobe(void *addr)
73 return NULL; 74 return NULL;
74} 75}
75 76
77/*
78 * Aggregate handlers for multiple kprobes support - these handlers
79 * take care of invoking the individual kprobe handlers on p->list
80 */
81int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
82{
83 struct kprobe *kp;
84
85 list_for_each_entry(kp, &p->list, list) {
86 if (kp->pre_handler) {
87 curr_kprobe = kp;
88 kp->pre_handler(kp, regs);
89 curr_kprobe = NULL;
90 }
91 }
92 return 0;
93}
94
95void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
96 unsigned long flags)
97{
98 struct kprobe *kp;
99
100 list_for_each_entry(kp, &p->list, list) {
101 if (kp->post_handler) {
102 curr_kprobe = kp;
103 kp->post_handler(kp, regs, flags);
104 curr_kprobe = NULL;
105 }
106 }
107 return;
108}
109
110int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr)
111{
112 /*
113 * if we faulted "during" the execution of a user specified
114 * probe handler, invoke just that probe's fault handler
115 */
116 if (curr_kprobe && curr_kprobe->fault_handler) {
117 if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr))
118 return 1;
119 }
120 return 0;
121}
122
123/*
124 * Fill in the required fields of the "manager kprobe". Replace the
125 * earlier kprobe in the hlist with the manager kprobe
126 */
127static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
128{
129 ap->addr = p->addr;
130 ap->opcode = p->opcode;
131 memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn));
132
133 ap->pre_handler = aggr_pre_handler;
134 ap->post_handler = aggr_post_handler;
135 ap->fault_handler = aggr_fault_handler;
136
137 INIT_LIST_HEAD(&ap->list);
138 list_add(&p->list, &ap->list);
139
140 INIT_HLIST_NODE(&ap->hlist);
141 hlist_del(&p->hlist);
142 hlist_add_head(&ap->hlist,
143 &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
144}
145
146/*
147 * This is the second or subsequent kprobe at the address - handle
148 * the intricacies
149 * TODO: Move kcalloc outside the spinlock
150 */
151static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
152{
153 int ret = 0;
154 struct kprobe *ap;
155
156 if (old_p->break_handler || p->break_handler) {
157 ret = -EEXIST; /* kprobe and jprobe can't (yet) coexist */
158 } else if (old_p->pre_handler == aggr_pre_handler) {
159 list_add(&p->list, &old_p->list);
160 } else {
161 ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
162 if (!ap)
163 return -ENOMEM;
164 add_aggr_kprobe(ap, old_p);
165 list_add(&p->list, &ap->list);
166 }
167 return ret;
168}
169
170/* kprobe removal house-keeping routines */
171static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
172{
173 *p->addr = p->opcode;
174 hlist_del(&p->hlist);
175 flush_icache_range((unsigned long) p->addr,
176 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
177 spin_unlock_irqrestore(&kprobe_lock, flags);
178 arch_remove_kprobe(p);
179}
180
181static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
182 struct kprobe *p, unsigned long flags)
183{
184 list_del(&p->list);
185 if (list_empty(&old_p->list)) {
186 cleanup_kprobe(old_p, flags);
187 kfree(old_p);
188 } else
189 spin_unlock_irqrestore(&kprobe_lock, flags);
190}
191
76int register_kprobe(struct kprobe *p) 192int register_kprobe(struct kprobe *p)
77{ 193{
78 int ret = 0; 194 int ret = 0;
79 unsigned long flags = 0; 195 unsigned long flags = 0;
196 struct kprobe *old_p;
80 197
81 if ((ret = arch_prepare_kprobe(p)) != 0) { 198 if ((ret = arch_prepare_kprobe(p)) != 0) {
82 goto rm_kprobe; 199 goto rm_kprobe;
83 } 200 }
84 spin_lock_irqsave(&kprobe_lock, flags); 201 spin_lock_irqsave(&kprobe_lock, flags);
85 INIT_HLIST_NODE(&p->hlist); 202 old_p = get_kprobe(p->addr);
86 if (get_kprobe(p->addr)) { 203 if (old_p) {
87 ret = -EEXIST; 204 ret = register_aggr_kprobe(old_p, p);
88 goto out; 205 goto out;
89 } 206 }
90 arch_copy_kprobe(p);
91 207
208 arch_copy_kprobe(p);
209 INIT_HLIST_NODE(&p->hlist);
92 hlist_add_head(&p->hlist, 210 hlist_add_head(&p->hlist,
93 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 211 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
94 212
@@ -107,17 +225,17 @@ rm_kprobe:
107void unregister_kprobe(struct kprobe *p) 225void unregister_kprobe(struct kprobe *p)
108{ 226{
109 unsigned long flags; 227 unsigned long flags;
228 struct kprobe *old_p;
229
110 spin_lock_irqsave(&kprobe_lock, flags); 230 spin_lock_irqsave(&kprobe_lock, flags);
111 if (!get_kprobe(p->addr)) { 231 old_p = get_kprobe(p->addr);
232 if (old_p) {
233 if (old_p->pre_handler == aggr_pre_handler)
234 cleanup_aggr_kprobe(old_p, p, flags);
235 else
236 cleanup_kprobe(p, flags);
237 } else
112 spin_unlock_irqrestore(&kprobe_lock, flags); 238 spin_unlock_irqrestore(&kprobe_lock, flags);
113 return;
114 }
115 *p->addr = p->opcode;
116 hlist_del(&p->hlist);
117 flush_icache_range((unsigned long) p->addr,
118 (unsigned long) p->addr + sizeof(kprobe_opcode_t));
119 spin_unlock_irqrestore(&kprobe_lock, flags);
120 arch_remove_kprobe(p);
121} 239}
122 240
123static struct notifier_block kprobe_exceptions_nb = { 241static struct notifier_block kprobe_exceptions_nb = {