diff options
author | Pekka Paalanen <pq@iki.fi> | 2008-05-12 15:20:59 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-24 05:25:37 -0400 |
commit | ff3a3e9ba5e4273a8bc10570adab4a390fb90757 (patch) | |
tree | 63fd9b1c69ba53c514b9b2eb59ee17f10d6511de /arch/x86/mm/mmio-mod.c | |
parent | 49023168261a7f9a2fd4a1ca1adbfea922556015 (diff) |
x86 mmiotrace: move files into arch/x86/mm/.
Signed-off-by: Pekka Paalanen <pq@iki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/mm/mmio-mod.c')
-rw-r--r-- | arch/x86/mm/mmio-mod.c | 457 |
1 files changed, 457 insertions, 0 deletions
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c new file mode 100644 index 000000000000..8256546d49bf --- /dev/null +++ b/arch/x86/mm/mmio-mod.c | |||
@@ -0,0 +1,457 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) IBM Corporation, 2005 | ||
17 | * Jeff Muizelaar, 2006, 2007 | ||
18 | * Pekka Paalanen, 2008 <pq@iki.fi> | ||
19 | * | ||
20 | * Derived from the read-mod example from relay-examples by Tom Zanussi. | ||
21 | */ | ||
22 | #define DEBUG 1 | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/debugfs.h> | ||
26 | #include <linux/uaccess.h> | ||
27 | #include <asm/io.h> | ||
28 | #include <linux/version.h> | ||
29 | #include <linux/kallsyms.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <linux/mmiotrace.h> | ||
32 | #include <asm/e820.h> /* for ISA_START_ADDRESS */ | ||
33 | #include <asm/atomic.h> | ||
34 | #include <linux/percpu.h> | ||
35 | |||
36 | #include "pf_in.h" | ||
37 | |||
38 | #define NAME "mmiotrace: " | ||
39 | |||
40 | struct trap_reason { | ||
41 | unsigned long addr; | ||
42 | unsigned long ip; | ||
43 | enum reason_type type; | ||
44 | int active_traces; | ||
45 | }; | ||
46 | |||
47 | struct remap_trace { | ||
48 | struct list_head list; | ||
49 | struct kmmio_probe probe; | ||
50 | unsigned long phys; | ||
51 | unsigned long id; | ||
52 | }; | ||
53 | |||
54 | /* Accessed per-cpu. */ | ||
55 | static DEFINE_PER_CPU(struct trap_reason, pf_reason); | ||
56 | static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace); | ||
57 | |||
58 | #if 0 /* XXX: no way gather this info anymore */ | ||
59 | /* Access to this is not per-cpu. */ | ||
60 | static DEFINE_PER_CPU(atomic_t, dropped); | ||
61 | #endif | ||
62 | |||
63 | static struct dentry *marker_file; | ||
64 | |||
65 | static DEFINE_MUTEX(mmiotrace_mutex); | ||
66 | static DEFINE_SPINLOCK(trace_lock); | ||
67 | static atomic_t mmiotrace_enabled; | ||
68 | static LIST_HEAD(trace_list); /* struct remap_trace */ | ||
69 | |||
70 | /* | ||
71 | * Locking in this file: | ||
72 | * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections. | ||
73 | * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex | ||
74 | * and trace_lock. | ||
75 | * - Routines depending on is_enabled() must take trace_lock. | ||
76 | * - trace_list users must hold trace_lock. | ||
77 | * - is_enabled() guarantees that mmio_trace_record is allowed. | ||
78 | * - pre/post callbacks assume the effect of is_enabled() being true. | ||
79 | */ | ||
80 | |||
81 | /* module parameters */ | ||
82 | static unsigned long filter_offset; | ||
83 | static int nommiotrace; | ||
84 | static int ISA_trace; | ||
85 | static int trace_pc; | ||
86 | |||
87 | module_param(filter_offset, ulong, 0); | ||
88 | module_param(nommiotrace, bool, 0); | ||
89 | module_param(ISA_trace, bool, 0); | ||
90 | module_param(trace_pc, bool, 0); | ||
91 | |||
92 | MODULE_PARM_DESC(filter_offset, "Start address of traced mappings."); | ||
93 | MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing."); | ||
94 | MODULE_PARM_DESC(ISA_trace, "Do not exclude the low ISA range."); | ||
95 | MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions."); | ||
96 | |||
97 | static bool is_enabled(void) | ||
98 | { | ||
99 | return atomic_read(&mmiotrace_enabled); | ||
100 | } | ||
101 | |||
102 | #if 0 /* XXX: needs rewrite */ | ||
103 | /* | ||
104 | * Write callback for the debugfs entry: | ||
105 | * Read a marker and write it to the mmio trace log | ||
106 | */ | ||
107 | static ssize_t write_marker(struct file *file, const char __user *buffer, | ||
108 | size_t count, loff_t *ppos) | ||
109 | { | ||
110 | char *event = NULL; | ||
111 | struct mm_io_header *headp; | ||
112 | ssize_t len = (count > 65535) ? 65535 : count; | ||
113 | |||
114 | event = kzalloc(sizeof(*headp) + len, GFP_KERNEL); | ||
115 | if (!event) | ||
116 | return -ENOMEM; | ||
117 | |||
118 | headp = (struct mm_io_header *)event; | ||
119 | headp->type = MMIO_MAGIC | (MMIO_MARKER << MMIO_OPCODE_SHIFT); | ||
120 | headp->data_len = len; | ||
121 | |||
122 | if (copy_from_user(event + sizeof(*headp), buffer, len)) { | ||
123 | kfree(event); | ||
124 | return -EFAULT; | ||
125 | } | ||
126 | |||
127 | spin_lock_irq(&trace_lock); | ||
128 | #if 0 /* XXX: convert this to use tracing */ | ||
129 | if (is_enabled()) | ||
130 | relay_write(chan, event, sizeof(*headp) + len); | ||
131 | else | ||
132 | #endif | ||
133 | len = -EINVAL; | ||
134 | spin_unlock_irq(&trace_lock); | ||
135 | kfree(event); | ||
136 | return len; | ||
137 | } | ||
138 | #endif | ||
139 | |||
140 | static void print_pte(unsigned long address) | ||
141 | { | ||
142 | int level; | ||
143 | pte_t *pte = lookup_address(address, &level); | ||
144 | |||
145 | if (!pte) { | ||
146 | pr_err(NAME "Error in %s: no pte for page 0x%08lx\n", | ||
147 | __func__, address); | ||
148 | return; | ||
149 | } | ||
150 | |||
151 | if (level == PG_LEVEL_2M) { | ||
152 | pr_emerg(NAME "4MB pages are not currently supported: " | ||
153 | "0x%08lx\n", address); | ||
154 | BUG(); | ||
155 | } | ||
156 | pr_info(NAME "pte for 0x%lx: 0x%lx 0x%lx\n", address, pte_val(*pte), | ||
157 | pte_val(*pte) & _PAGE_PRESENT); | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * For some reason the pre/post pairs have been called in an | ||
162 | * unmatched order. Report and die. | ||
163 | */ | ||
164 | static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr) | ||
165 | { | ||
166 | const struct trap_reason *my_reason = &get_cpu_var(pf_reason); | ||
167 | pr_emerg(NAME "unexpected fault for address: 0x%08lx, " | ||
168 | "last fault for address: 0x%08lx\n", | ||
169 | addr, my_reason->addr); | ||
170 | print_pte(addr); | ||
171 | print_symbol(KERN_EMERG "faulting IP is at %s\n", regs->ip); | ||
172 | print_symbol(KERN_EMERG "last faulting IP was at %s\n", my_reason->ip); | ||
173 | #ifdef __i386__ | ||
174 | pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", | ||
175 | regs->ax, regs->bx, regs->cx, regs->dx); | ||
176 | pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n", | ||
177 | regs->si, regs->di, regs->bp, regs->sp); | ||
178 | #else | ||
179 | pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n", | ||
180 | regs->ax, regs->cx, regs->dx); | ||
181 | pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n", | ||
182 | regs->si, regs->di, regs->bp, regs->sp); | ||
183 | #endif | ||
184 | put_cpu_var(pf_reason); | ||
185 | BUG(); | ||
186 | } | ||
187 | |||
188 | static void pre(struct kmmio_probe *p, struct pt_regs *regs, | ||
189 | unsigned long addr) | ||
190 | { | ||
191 | struct trap_reason *my_reason = &get_cpu_var(pf_reason); | ||
192 | struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace); | ||
193 | const unsigned long instptr = instruction_pointer(regs); | ||
194 | const enum reason_type type = get_ins_type(instptr); | ||
195 | struct remap_trace *trace = p->user_data; | ||
196 | |||
197 | /* it doesn't make sense to have more than one active trace per cpu */ | ||
198 | if (my_reason->active_traces) | ||
199 | die_kmmio_nesting_error(regs, addr); | ||
200 | else | ||
201 | my_reason->active_traces++; | ||
202 | |||
203 | my_reason->type = type; | ||
204 | my_reason->addr = addr; | ||
205 | my_reason->ip = instptr; | ||
206 | |||
207 | my_trace->phys = addr - trace->probe.addr + trace->phys; | ||
208 | my_trace->map_id = trace->id; | ||
209 | |||
210 | /* | ||
211 | * Only record the program counter when requested. | ||
212 | * It may taint clean-room reverse engineering. | ||
213 | */ | ||
214 | if (trace_pc) | ||
215 | my_trace->pc = instptr; | ||
216 | else | ||
217 | my_trace->pc = 0; | ||
218 | |||
219 | /* | ||
220 | * XXX: the timestamp recorded will be *after* the tracing has been | ||
221 | * done, not at the time we hit the instruction. SMP implications | ||
222 | * on event ordering? | ||
223 | */ | ||
224 | |||
225 | switch (type) { | ||
226 | case REG_READ: | ||
227 | my_trace->opcode = MMIO_READ; | ||
228 | my_trace->width = get_ins_mem_width(instptr); | ||
229 | break; | ||
230 | case REG_WRITE: | ||
231 | my_trace->opcode = MMIO_WRITE; | ||
232 | my_trace->width = get_ins_mem_width(instptr); | ||
233 | my_trace->value = get_ins_reg_val(instptr, regs); | ||
234 | break; | ||
235 | case IMM_WRITE: | ||
236 | my_trace->opcode = MMIO_WRITE; | ||
237 | my_trace->width = get_ins_mem_width(instptr); | ||
238 | my_trace->value = get_ins_imm_val(instptr); | ||
239 | break; | ||
240 | default: | ||
241 | { | ||
242 | unsigned char *ip = (unsigned char *)instptr; | ||
243 | my_trace->opcode = MMIO_UNKNOWN_OP; | ||
244 | my_trace->width = 0; | ||
245 | my_trace->value = (*ip) << 16 | *(ip + 1) << 8 | | ||
246 | *(ip + 2); | ||
247 | } | ||
248 | } | ||
249 | put_cpu_var(cpu_trace); | ||
250 | put_cpu_var(pf_reason); | ||
251 | } | ||
252 | |||
253 | static void post(struct kmmio_probe *p, unsigned long condition, | ||
254 | struct pt_regs *regs) | ||
255 | { | ||
256 | struct trap_reason *my_reason = &get_cpu_var(pf_reason); | ||
257 | struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace); | ||
258 | |||
259 | /* this should always return the active_trace count to 0 */ | ||
260 | my_reason->active_traces--; | ||
261 | if (my_reason->active_traces) { | ||
262 | pr_emerg(NAME "unexpected post handler"); | ||
263 | BUG(); | ||
264 | } | ||
265 | |||
266 | switch (my_reason->type) { | ||
267 | case REG_READ: | ||
268 | my_trace->value = get_ins_reg_val(my_reason->ip, regs); | ||
269 | break; | ||
270 | default: | ||
271 | break; | ||
272 | } | ||
273 | |||
274 | mmio_trace_rw(my_trace); | ||
275 | put_cpu_var(cpu_trace); | ||
276 | put_cpu_var(pf_reason); | ||
277 | } | ||
278 | |||
279 | static void ioremap_trace_core(unsigned long offset, unsigned long size, | ||
280 | void __iomem *addr) | ||
281 | { | ||
282 | static atomic_t next_id; | ||
283 | struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL); | ||
284 | struct mmiotrace_map map = { | ||
285 | .phys = offset, | ||
286 | .virt = (unsigned long)addr, | ||
287 | .len = size, | ||
288 | .opcode = MMIO_PROBE | ||
289 | }; | ||
290 | |||
291 | if (!trace) { | ||
292 | pr_err(NAME "kmalloc failed in ioremap\n"); | ||
293 | return; | ||
294 | } | ||
295 | |||
296 | *trace = (struct remap_trace) { | ||
297 | .probe = { | ||
298 | .addr = (unsigned long)addr, | ||
299 | .len = size, | ||
300 | .pre_handler = pre, | ||
301 | .post_handler = post, | ||
302 | .user_data = trace | ||
303 | }, | ||
304 | .phys = offset, | ||
305 | .id = atomic_inc_return(&next_id) | ||
306 | }; | ||
307 | map.map_id = trace->id; | ||
308 | |||
309 | spin_lock_irq(&trace_lock); | ||
310 | if (!is_enabled()) | ||
311 | goto not_enabled; | ||
312 | |||
313 | mmio_trace_mapping(&map); | ||
314 | list_add_tail(&trace->list, &trace_list); | ||
315 | if (!nommiotrace) | ||
316 | register_kmmio_probe(&trace->probe); | ||
317 | |||
318 | not_enabled: | ||
319 | spin_unlock_irq(&trace_lock); | ||
320 | } | ||
321 | |||
322 | void | ||
323 | mmiotrace_ioremap(unsigned long offset, unsigned long size, void __iomem *addr) | ||
324 | { | ||
325 | if (!is_enabled()) /* recheck and proper locking in *_core() */ | ||
326 | return; | ||
327 | |||
328 | pr_debug(NAME "ioremap_*(0x%lx, 0x%lx) = %p\n", offset, size, addr); | ||
329 | if ((filter_offset) && (offset != filter_offset)) | ||
330 | return; | ||
331 | ioremap_trace_core(offset, size, addr); | ||
332 | } | ||
333 | |||
334 | static void iounmap_trace_core(volatile void __iomem *addr) | ||
335 | { | ||
336 | struct mmiotrace_map map = { | ||
337 | .phys = 0, | ||
338 | .virt = (unsigned long)addr, | ||
339 | .len = 0, | ||
340 | .opcode = MMIO_UNPROBE | ||
341 | }; | ||
342 | struct remap_trace *trace; | ||
343 | struct remap_trace *tmp; | ||
344 | struct remap_trace *found_trace = NULL; | ||
345 | |||
346 | pr_debug(NAME "Unmapping %p.\n", addr); | ||
347 | |||
348 | spin_lock_irq(&trace_lock); | ||
349 | if (!is_enabled()) | ||
350 | goto not_enabled; | ||
351 | |||
352 | list_for_each_entry_safe(trace, tmp, &trace_list, list) { | ||
353 | if ((unsigned long)addr == trace->probe.addr) { | ||
354 | if (!nommiotrace) | ||
355 | unregister_kmmio_probe(&trace->probe); | ||
356 | list_del(&trace->list); | ||
357 | found_trace = trace; | ||
358 | break; | ||
359 | } | ||
360 | } | ||
361 | map.map_id = (found_trace) ? found_trace->id : -1; | ||
362 | mmio_trace_mapping(&map); | ||
363 | |||
364 | not_enabled: | ||
365 | spin_unlock_irq(&trace_lock); | ||
366 | if (found_trace) { | ||
367 | synchronize_rcu(); /* unregister_kmmio_probe() requirement */ | ||
368 | kfree(found_trace); | ||
369 | } | ||
370 | } | ||
371 | |||
372 | void mmiotrace_iounmap(volatile void __iomem *addr) | ||
373 | { | ||
374 | might_sleep(); | ||
375 | if (is_enabled()) /* recheck and proper locking in *_core() */ | ||
376 | iounmap_trace_core(addr); | ||
377 | } | ||
378 | |||
379 | static void clear_trace_list(void) | ||
380 | { | ||
381 | struct remap_trace *trace; | ||
382 | struct remap_trace *tmp; | ||
383 | |||
384 | /* | ||
385 | * No locking required, because the caller ensures we are in a | ||
386 | * critical section via mutex, and is_enabled() is false, | ||
387 | * i.e. nothing can traverse or modify this list. | ||
388 | * Caller also ensures is_enabled() cannot change. | ||
389 | */ | ||
390 | list_for_each_entry(trace, &trace_list, list) { | ||
391 | pr_notice(NAME "purging non-iounmapped " | ||
392 | "trace @0x%08lx, size 0x%lx.\n", | ||
393 | trace->probe.addr, trace->probe.len); | ||
394 | if (!nommiotrace) | ||
395 | unregister_kmmio_probe(&trace->probe); | ||
396 | } | ||
397 | synchronize_rcu(); /* unregister_kmmio_probe() requirement */ | ||
398 | |||
399 | list_for_each_entry_safe(trace, tmp, &trace_list, list) { | ||
400 | list_del(&trace->list); | ||
401 | kfree(trace); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | #if 0 /* XXX: out of order */ | ||
406 | static struct file_operations fops_marker = { | ||
407 | .owner = THIS_MODULE, | ||
408 | .write = write_marker | ||
409 | }; | ||
410 | #endif | ||
411 | |||
412 | void enable_mmiotrace(void) | ||
413 | { | ||
414 | mutex_lock(&mmiotrace_mutex); | ||
415 | if (is_enabled()) | ||
416 | goto out; | ||
417 | |||
418 | #if 0 /* XXX: tracing does not support text entries */ | ||
419 | marker_file = debugfs_create_file("marker", 0660, dir, NULL, | ||
420 | &fops_marker); | ||
421 | if (!marker_file) | ||
422 | pr_err(NAME "marker file creation failed.\n"); | ||
423 | #endif | ||
424 | |||
425 | if (nommiotrace) | ||
426 | pr_info(NAME "MMIO tracing disabled.\n"); | ||
427 | if (ISA_trace) | ||
428 | pr_warning(NAME "Warning! low ISA range will be traced.\n"); | ||
429 | spin_lock_irq(&trace_lock); | ||
430 | atomic_inc(&mmiotrace_enabled); | ||
431 | spin_unlock_irq(&trace_lock); | ||
432 | pr_info(NAME "enabled.\n"); | ||
433 | out: | ||
434 | mutex_unlock(&mmiotrace_mutex); | ||
435 | } | ||
436 | |||
437 | void disable_mmiotrace(void) | ||
438 | { | ||
439 | mutex_lock(&mmiotrace_mutex); | ||
440 | if (!is_enabled()) | ||
441 | goto out; | ||
442 | |||
443 | spin_lock_irq(&trace_lock); | ||
444 | atomic_dec(&mmiotrace_enabled); | ||
445 | BUG_ON(is_enabled()); | ||
446 | spin_unlock_irq(&trace_lock); | ||
447 | |||
448 | clear_trace_list(); /* guarantees: no more kmmio callbacks */ | ||
449 | if (marker_file) { | ||
450 | debugfs_remove(marker_file); | ||
451 | marker_file = NULL; | ||
452 | } | ||
453 | |||
454 | pr_info(NAME "disabled.\n"); | ||
455 | out: | ||
456 | mutex_unlock(&mmiotrace_mutex); | ||
457 | } | ||