aboutsummaryrefslogtreecommitdiffstats
path: root/arch/avr32/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/avr32/kernel/kprobes.c')
-rw-r--r--arch/avr32/kernel/kprobes.c270
1 files changed, 270 insertions, 0 deletions
diff --git a/arch/avr32/kernel/kprobes.c b/arch/avr32/kernel/kprobes.c
new file mode 100644
index 000000000000..6caf9e8d8080
--- /dev/null
+++ b/arch/avr32/kernel/kprobes.c
@@ -0,0 +1,270 @@
1/*
2 * Kernel Probes (KProbes)
3 *
4 * Copyright (C) 2005-2006 Atmel Corporation
5 *
6 * Based on arch/ppc64/kernel/kprobes.c
7 * Copyright (C) IBM Corporation, 2002, 2004
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kprobes.h>
15#include <linux/ptrace.h>
16
17#include <asm/cacheflush.h>
18#include <asm/kdebug.h>
19#include <asm/ocd.h>
20
21DEFINE_PER_CPU(struct kprobe *, current_kprobe);
22static unsigned long kprobe_status;
23static struct pt_regs jprobe_saved_regs;
24
25int __kprobes arch_prepare_kprobe(struct kprobe *p)
26{
27 int ret = 0;
28
29 if ((unsigned long)p->addr & 0x01) {
30 printk("Attempt to register kprobe at an unaligned address\n");
31 ret = -EINVAL;
32 }
33
34 /* XXX: Might be a good idea to check if p->addr is a valid
35 * kernel address as well... */
36
37 if (!ret) {
38 pr_debug("copy kprobe at %p\n", p->addr);
39 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
40 p->opcode = *p->addr;
41 }
42
43 return ret;
44}
45
46void __kprobes arch_arm_kprobe(struct kprobe *p)
47{
48 pr_debug("arming kprobe at %p\n", p->addr);
49 *p->addr = BREAKPOINT_INSTRUCTION;
50 flush_icache_range((unsigned long)p->addr,
51 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
52}
53
54void __kprobes arch_disarm_kprobe(struct kprobe *p)
55{
56 pr_debug("disarming kprobe at %p\n", p->addr);
57 *p->addr = p->opcode;
58 flush_icache_range((unsigned long)p->addr,
59 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
60}
61
62static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
63{
64 unsigned long dc;
65
66 pr_debug("preparing to singlestep over %p (PC=%08lx)\n",
67 p->addr, regs->pc);
68
69 BUG_ON(!(sysreg_read(SR) & SYSREG_BIT(SR_D)));
70
71 dc = __mfdr(DBGREG_DC);
72 dc |= DC_SS;
73 __mtdr(DBGREG_DC, dc);
74
75 /*
76 * We must run the instruction from its original location
77 * since it may actually reference PC.
78 *
79 * TODO: Do the instruction replacement directly in icache.
80 */
81 *p->addr = p->opcode;
82 flush_icache_range((unsigned long)p->addr,
83 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
84}
85
86static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
87{
88 unsigned long dc;
89
90 pr_debug("resuming execution at PC=%08lx\n", regs->pc);
91
92 dc = __mfdr(DBGREG_DC);
93 dc &= ~DC_SS;
94 __mtdr(DBGREG_DC, dc);
95
96 *p->addr = BREAKPOINT_INSTRUCTION;
97 flush_icache_range((unsigned long)p->addr,
98 (unsigned long)p->addr + sizeof(kprobe_opcode_t));
99}
100
101static void __kprobes set_current_kprobe(struct kprobe *p)
102{
103 __get_cpu_var(current_kprobe) = p;
104}
105
106static int __kprobes kprobe_handler(struct pt_regs *regs)
107{
108 struct kprobe *p;
109 void *addr = (void *)regs->pc;
110 int ret = 0;
111
112 pr_debug("kprobe_handler: kprobe_running=%d\n",
113 kprobe_running());
114
115 /*
116 * We don't want to be preempted for the entire
117 * duration of kprobe processing
118 */
119 preempt_disable();
120
121 /* Check that we're not recursing */
122 if (kprobe_running()) {
123 p = get_kprobe(addr);
124 if (p) {
125 if (kprobe_status == KPROBE_HIT_SS) {
126 printk("FIXME: kprobe hit while single-stepping!\n");
127 goto no_kprobe;
128 }
129
130 printk("FIXME: kprobe hit while handling another kprobe\n");
131 goto no_kprobe;
132 } else {
133 p = kprobe_running();
134 if (p->break_handler && p->break_handler(p, regs))
135 goto ss_probe;
136 }
137 /* If it's not ours, can't be delete race, (we hold lock). */
138 goto no_kprobe;
139 }
140
141 p = get_kprobe(addr);
142 if (!p)
143 goto no_kprobe;
144
145 kprobe_status = KPROBE_HIT_ACTIVE;
146 set_current_kprobe(p);
147 if (p->pre_handler && p->pre_handler(p, regs))
148 /* handler has already set things up, so skip ss setup */
149 return 1;
150
151ss_probe:
152 prepare_singlestep(p, regs);
153 kprobe_status = KPROBE_HIT_SS;
154 return 1;
155
156no_kprobe:
157 return ret;
158}
159
160static int __kprobes post_kprobe_handler(struct pt_regs *regs)
161{
162 struct kprobe *cur = kprobe_running();
163
164 pr_debug("post_kprobe_handler, cur=%p\n", cur);
165
166 if (!cur)
167 return 0;
168
169 if (cur->post_handler) {
170 kprobe_status = KPROBE_HIT_SSDONE;
171 cur->post_handler(cur, regs, 0);
172 }
173
174 resume_execution(cur, regs);
175 reset_current_kprobe();
176 preempt_enable_no_resched();
177
178 return 1;
179}
180
181static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
182{
183 struct kprobe *cur = kprobe_running();
184
185 pr_debug("kprobe_fault_handler: trapnr=%d\n", trapnr);
186
187 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
188 return 1;
189
190 if (kprobe_status & KPROBE_HIT_SS) {
191 resume_execution(cur, regs);
192 preempt_enable_no_resched();
193 }
194 return 0;
195}
196
197/*
198 * Wrapper routine to for handling exceptions.
199 */
200int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
201 unsigned long val, void *data)
202{
203 struct die_args *args = (struct die_args *)data;
204 int ret = NOTIFY_DONE;
205
206 pr_debug("kprobe_exceptions_notify: val=%lu, data=%p\n",
207 val, data);
208
209 switch (val) {
210 case DIE_BREAKPOINT:
211 if (kprobe_handler(args->regs))
212 ret = NOTIFY_STOP;
213 break;
214 case DIE_SSTEP:
215 if (post_kprobe_handler(args->regs))
216 ret = NOTIFY_STOP;
217 break;
218 case DIE_FAULT:
219 if (kprobe_running()
220 && kprobe_fault_handler(args->regs, args->trapnr))
221 ret = NOTIFY_STOP;
222 break;
223 default:
224 break;
225 }
226
227 return ret;
228}
229
230int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
231{
232 struct jprobe *jp = container_of(p, struct jprobe, kp);
233
234 memcpy(&jprobe_saved_regs, regs, sizeof(struct pt_regs));
235
236 /*
237 * TODO: We should probably save some of the stack here as
238 * well, since gcc may pass arguments on the stack for certain
239 * functions (lots of arguments, large aggregates, varargs)
240 */
241
242 /* setup return addr to the jprobe handler routine */
243 regs->pc = (unsigned long)jp->entry;
244 return 1;
245}
246
247void __kprobes jprobe_return(void)
248{
249 asm volatile("breakpoint" ::: "memory");
250}
251
252int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
253{
254 /*
255 * FIXME - we should ideally be validating that we got here 'cos
256 * of the "trap" in jprobe_return() above, before restoring the
257 * saved regs...
258 */
259 memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs));
260 return 1;
261}
262
263int __init arch_init_kprobes(void)
264{
265 printk("KPROBES: Enabling monitor mode (MM|DBE)...\n");
266 __mtdr(DBGREG_DC, DC_MM | DC_DBE);
267
268 /* TODO: Register kretprobe trampoline */
269 return 0;
270}