aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/Kconfig.debug11
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/kprobes.c332
3 files changed, 344 insertions, 0 deletions
diff --git a/arch/ia64/Kconfig.debug b/arch/ia64/Kconfig.debug
index de9d507ba0fd..fda67ac993d7 100644
--- a/arch/ia64/Kconfig.debug
+++ b/arch/ia64/Kconfig.debug
@@ -2,6 +2,17 @@ menu "Kernel hacking"
2 2
3source "lib/Kconfig.debug" 3source "lib/Kconfig.debug"
4 4
5config KPROBES
6 bool "Kprobes"
7 depends on DEBUG_KERNEL
8 help
9 Kprobes allows you to trap at almost any kernel address and
10 execute a callback function. register_kprobe() establishes
11 a probepoint and specifies the callback. Kprobes is useful
12 for kernel debugging, non-intrusive instrumentation and testing.
13 If in doubt, say "N".
14
15
5choice 16choice
6 prompt "Physical memory granularity" 17 prompt "Physical memory granularity"
7 default IA64_GRANULE_64MB 18 default IA64_GRANULE_64MB
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 4c73d8ba2e3d..5290fa4c3371 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_SMP) += smp.o smpboot.o domain.o
20obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o 20obj-$(CONFIG_PERFMON) += perfmon_default_smpl.o
21obj-$(CONFIG_IA64_CYCLONE) += cyclone.o 21obj-$(CONFIG_IA64_CYCLONE) += cyclone.o
22obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o 22obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
23obj-$(CONFIG_KPROBES) += kprobes.o
23obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o 24obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
24mca_recovery-y += mca_drv.o mca_drv_asm.o 25mca_recovery-y += mca_drv.o mca_drv_asm.o
25 26
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
new file mode 100644
index 000000000000..81a53f99a573
--- /dev/null
+++ b/arch/ia64/kernel/kprobes.c
@@ -0,0 +1,332 @@
1/*
2 * Kernel Probes (KProbes)
3 * arch/ia64/kernel/kprobes.c
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 *
19 * Copyright (C) IBM Corporation, 2002, 2004
20 * Copyright (C) Intel Corporation, 2005
21 *
22 * 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
23 * <anil.s.keshavamurthy@intel.com> adapted from i386
24 */
25
26#include <linux/config.h>
27#include <linux/kprobes.h>
28#include <linux/ptrace.h>
29#include <linux/spinlock.h>
30#include <linux/string.h>
31#include <linux/slab.h>
32#include <linux/preempt.h>
33#include <linux/moduleloader.h>
34
35#include <asm/pgtable.h>
36#include <asm/kdebug.h>
37
38/* kprobe_status settings */
39#define KPROBE_HIT_ACTIVE 0x00000001
40#define KPROBE_HIT_SS 0x00000002
41
42static struct kprobe *current_kprobe;
43static unsigned long kprobe_status;
44
45enum instruction_type {A, I, M, F, B, L, X, u};
46static enum instruction_type bundle_encoding[32][3] = {
47 { M, I, I }, /* 00 */
48 { M, I, I }, /* 01 */
49 { M, I, I }, /* 02 */
50 { M, I, I }, /* 03 */
51 { M, L, X }, /* 04 */
52 { M, L, X }, /* 05 */
53 { u, u, u }, /* 06 */
54 { u, u, u }, /* 07 */
55 { M, M, I }, /* 08 */
56 { M, M, I }, /* 09 */
57 { M, M, I }, /* 0A */
58 { M, M, I }, /* 0B */
59 { M, F, I }, /* 0C */
60 { M, F, I }, /* 0D */
61 { M, M, F }, /* 0E */
62 { M, M, F }, /* 0F */
63 { M, I, B }, /* 10 */
64 { M, I, B }, /* 11 */
65 { M, B, B }, /* 12 */
66 { M, B, B }, /* 13 */
67 { u, u, u }, /* 14 */
68 { u, u, u }, /* 15 */
69 { B, B, B }, /* 16 */
70 { B, B, B }, /* 17 */
71 { M, M, B }, /* 18 */
72 { M, M, B }, /* 19 */
73 { u, u, u }, /* 1A */
74 { u, u, u }, /* 1B */
75 { M, F, B }, /* 1C */
76 { M, F, B }, /* 1D */
77 { u, u, u }, /* 1E */
78 { u, u, u }, /* 1F */
79};
80
81int arch_prepare_kprobe(struct kprobe *p)
82{
83 unsigned long addr = (unsigned long) p->addr;
84 unsigned long bundle_addr = addr & ~0xFULL;
85 unsigned long slot = addr & 0xf;
86 bundle_t bundle;
87 unsigned long template;
88
89 /*
90 * TODO: Verify that a probe is not being inserted
91 * in sensitive regions of code
92 * TODO: Verify that the memory holding the probe is rwx
93 * TODO: verify this is a kernel address
94 */
95 memcpy(&bundle, (unsigned long *)bundle_addr, sizeof(bundle_t));
96 template = bundle.quad0.template;
97 if (((bundle_encoding[template][1] == L) && slot > 1) || (slot > 2)) {
98 printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n", addr);
99 return -EINVAL;
100 }
101 return 0;
102}
103
104void arch_copy_kprobe(struct kprobe *p)
105{
106 unsigned long addr = (unsigned long)p->addr;
107 unsigned long bundle_addr = addr & ~0xFULL;
108
109 memcpy(&p->ainsn.insn.bundle, (unsigned long *)bundle_addr,
110 sizeof(bundle_t));
111 memcpy(&p->opcode.bundle, &p->ainsn.insn.bundle, sizeof(bundle_t));
112}
113
114void arch_arm_kprobe(struct kprobe *p)
115{
116 unsigned long addr = (unsigned long)p->addr;
117 unsigned long arm_addr = addr & ~0xFULL;
118 unsigned long slot = addr & 0xf;
119 unsigned long template;
120 bundle_t bundle;
121
122 memcpy(&bundle, &p->ainsn.insn.bundle, sizeof(bundle_t));
123
124 template = bundle.quad0.template;
125 if (slot == 1 && bundle_encoding[template][1] == L)
126 slot = 2;
127 switch (slot) {
128 case 0:
129 bundle.quad0.slot0 = BREAK_INST;
130 break;
131 case 1:
132 bundle.quad0.slot1_p0 = BREAK_INST;
133 bundle.quad1.slot1_p1 = (BREAK_INST >> (64-46));
134 break;
135 case 2:
136 bundle.quad1.slot2 = BREAK_INST;
137 break;
138 }
139
140 /* Flush icache for the instruction at the emulated address */
141 flush_icache_range((unsigned long)&p->ainsn.insn.bundle,
142 (unsigned long)&p->ainsn.insn.bundle +
143 sizeof(bundle_t));
144 /*
145 * Patch the original instruction with the probe instruction
146 * and flush the instruction cache
147 */
148 memcpy((char *) arm_addr, (char *) &bundle, sizeof(bundle_t));
149 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
150}
151
152void arch_disarm_kprobe(struct kprobe *p)
153{
154 unsigned long addr = (unsigned long)p->addr;
155 unsigned long arm_addr = addr & ~0xFULL;
156
157 /* p->opcode contains the original unaltered bundle */
158 memcpy((char *) arm_addr, (char *) &p->opcode.bundle, sizeof(bundle_t));
159 flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
160}
161
162void arch_remove_kprobe(struct kprobe *p)
163{
164}
165
166/*
167 * We are resuming execution after a single step fault, so the pt_regs
168 * structure reflects the register state after we executed the instruction
169 * located in the kprobe (p->ainsn.insn.bundle). We still need to adjust
170 * the ip to point back to the original stack address, and if we see that
171 * the slot has incremented back to zero, then we need to point to the next
172 * slot location.
173 */
174static void resume_execution(struct kprobe *p, struct pt_regs *regs)
175{
176 unsigned long bundle = (unsigned long)p->addr & ~0xFULL;
177
178 /*
179 * TODO: Handle cases where kprobe was inserted on a branch instruction
180 */
181
182 if (!ia64_psr(regs)->ri)
183 regs->cr_iip = bundle + 0x10;
184 else
185 regs->cr_iip = bundle;
186
187 ia64_psr(regs)->ss = 0;
188}
189
190static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
191{
192 unsigned long bundle_addr = (unsigned long) &p->ainsn.insn.bundle;
193 unsigned long slot = (unsigned long)p->addr & 0xf;
194
195 /* Update instruction pointer (IIP) and slot number (IPSR.ri) */
196 regs->cr_iip = bundle_addr & ~0xFULL;
197
198 if (slot > 2)
199 slot = 0;
200
201 ia64_psr(regs)->ri = slot;
202
203 /* turn on single stepping */
204 ia64_psr(regs)->ss = 1;
205}
206
207static int pre_kprobes_handler(struct pt_regs *regs)
208{
209 struct kprobe *p;
210 int ret = 0;
211 kprobe_opcode_t *addr = (kprobe_opcode_t *)instruction_pointer(regs);
212
213 preempt_disable();
214
215 /* Handle recursion cases */
216 if (kprobe_running()) {
217 p = get_kprobe(addr);
218 if (p) {
219 if (kprobe_status == KPROBE_HIT_SS) {
220 unlock_kprobes();
221 goto no_kprobe;
222 }
223 arch_disarm_kprobe(p);
224 ret = 1;
225 } else {
226 /*
227 * jprobe instrumented function just completed
228 */
229 p = current_kprobe;
230 if (p->break_handler && p->break_handler(p, regs)) {
231 goto ss_probe;
232 }
233 }
234 }
235
236 lock_kprobes();
237 p = get_kprobe(addr);
238 if (!p) {
239 unlock_kprobes();
240 goto no_kprobe;
241 }
242
243 kprobe_status = KPROBE_HIT_ACTIVE;
244 current_kprobe = p;
245
246 if (p->pre_handler && p->pre_handler(p, regs))
247 /*
248 * Our pre-handler is specifically requesting that we just
249 * do a return. This is handling the case where the
250 * pre-handler is really our special jprobe pre-handler.
251 */
252 return 1;
253
254ss_probe:
255 prepare_ss(p, regs);
256 kprobe_status = KPROBE_HIT_SS;
257 return 1;
258
259no_kprobe:
260 preempt_enable_no_resched();
261 return ret;
262}
263
264static int post_kprobes_handler(struct pt_regs *regs)
265{
266 if (!kprobe_running())
267 return 0;
268
269 if (current_kprobe->post_handler)
270 current_kprobe->post_handler(current_kprobe, regs, 0);
271
272 resume_execution(current_kprobe, regs);
273
274 unlock_kprobes();
275 preempt_enable_no_resched();
276 return 1;
277}
278
279static int kprobes_fault_handler(struct pt_regs *regs, int trapnr)
280{
281 if (!kprobe_running())
282 return 0;
283
284 if (current_kprobe->fault_handler &&
285 current_kprobe->fault_handler(current_kprobe, regs, trapnr))
286 return 1;
287
288 if (kprobe_status & KPROBE_HIT_SS) {
289 resume_execution(current_kprobe, regs);
290 unlock_kprobes();
291 preempt_enable_no_resched();
292 }
293
294 return 0;
295}
296
297int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val,
298 void *data)
299{
300 struct die_args *args = (struct die_args *)data;
301 switch(val) {
302 case DIE_BREAK:
303 if (pre_kprobes_handler(args->regs))
304 return NOTIFY_STOP;
305 break;
306 case DIE_SS:
307 if (post_kprobes_handler(args->regs))
308 return NOTIFY_STOP;
309 break;
310 case DIE_PAGE_FAULT:
311 if (kprobes_fault_handler(args->regs, args->trapnr))
312 return NOTIFY_STOP;
313 default:
314 break;
315 }
316 return NOTIFY_DONE;
317}
318
319int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
320{
321 printk(KERN_WARNING "Jprobes is not supported\n");
322 return 0;
323}
324
325void jprobe_return(void)
326{
327}
328
329int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
330{
331 return 0;
332}