aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/mmiotrace
diff options
context:
space:
mode:
authorPekka Paalanen <pq@iki.fi>2008-05-12 15:20:57 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-24 05:22:01 -0400
commitf513638030ca384b0bace4df64f0b82f6ae1e4c6 (patch)
tree7f766aa7339528bcce42b26fd463c5d5ef89f990 /arch/x86/kernel/mmiotrace
parent10c43d2eb50c9a5ad60388b9d3c41c31150049e6 (diff)
x86 mmiotrace: Use percpu instead of arrays.
Signed-off-by: Pekka Paalanen <pq@iki.fi> Cc: Eric Dumazet <dada1@cosmosbay.com> Cc: pq@iki.fi Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/mmiotrace')
-rw-r--r--arch/x86/kernel/mmiotrace/kmmio.c27
-rw-r--r--arch/x86/kernel/mmiotrace/mmio-mod.c80
2 files changed, 58 insertions, 49 deletions
diff --git a/arch/x86/kernel/mmiotrace/kmmio.c b/arch/x86/kernel/mmiotrace/kmmio.c
index e759f7c3878f..5e239d0b8467 100644
--- a/arch/x86/kernel/mmiotrace/kmmio.c
+++ b/arch/x86/kernel/mmiotrace/kmmio.c
@@ -16,6 +16,7 @@
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/ptrace.h> 17#include <linux/ptrace.h>
18#include <linux/preempt.h> 18#include <linux/preempt.h>
19#include <linux/percpu.h>
19#include <asm/io.h> 20#include <asm/io.h>
20#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
21#include <asm/errno.h> 22#include <asm/errno.h>
@@ -49,7 +50,8 @@ static unsigned int handler_registered;
49static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE]; 50static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
50static LIST_HEAD(kmmio_probes); 51static LIST_HEAD(kmmio_probes);
51 52
52static struct kmmio_context kmmio_ctx[NR_CPUS]; 53/* Accessed per-cpu */
54static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
53 55
54static struct notifier_block nb_die = { 56static struct notifier_block nb_die = {
55 .notifier_call = kmmio_die_notifier 57 .notifier_call = kmmio_die_notifier
@@ -173,8 +175,7 @@ static void disarm_kmmio_fault_page(unsigned long page, int *page_level)
173 */ 175 */
174static int kmmio_handler(struct pt_regs *regs, unsigned long addr) 176static int kmmio_handler(struct pt_regs *regs, unsigned long addr)
175{ 177{
176 struct kmmio_context *ctx; 178 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
177 int cpu;
178 179
179 /* 180 /*
180 * Preemption is now disabled to prevent process switch during 181 * Preemption is now disabled to prevent process switch during
@@ -187,8 +188,6 @@ static int kmmio_handler(struct pt_regs *regs, unsigned long addr)
187 * And that interrupt triggers a kmmio trap? 188 * And that interrupt triggers a kmmio trap?
188 */ 189 */
189 preempt_disable(); 190 preempt_disable();
190 cpu = smp_processor_id();
191 ctx = &kmmio_ctx[cpu];
192 191
193 /* interrupts disabled and CPU-local data => atomicity guaranteed. */ 192 /* interrupts disabled and CPU-local data => atomicity guaranteed. */
194 if (ctx->active) { 193 if (ctx->active) {
@@ -199,7 +198,7 @@ static int kmmio_handler(struct pt_regs *regs, unsigned long addr)
199 */ 198 */
200 printk(KERN_EMERG "mmiotrace: recursive probe hit on CPU %d, " 199 printk(KERN_EMERG "mmiotrace: recursive probe hit on CPU %d, "
201 "for address %lu. Ignoring.\n", 200 "for address %lu. Ignoring.\n",
202 cpu, addr); 201 smp_processor_id(), addr);
203 goto no_kmmio; 202 goto no_kmmio;
204 } 203 }
205 ctx->active++; 204 ctx->active++;
@@ -231,6 +230,7 @@ static int kmmio_handler(struct pt_regs *regs, unsigned long addr)
231 /* We hold lock, now we set present bit in PTE and single step. */ 230 /* We hold lock, now we set present bit in PTE and single step. */
232 disarm_kmmio_fault_page(ctx->fpage->page, NULL); 231 disarm_kmmio_fault_page(ctx->fpage->page, NULL);
233 232
233 put_cpu_var(kmmio_ctx);
234 return 1; 234 return 1;
235 235
236no_kmmio_locked: 236no_kmmio_locked:
@@ -238,6 +238,7 @@ no_kmmio_locked:
238 ctx->active--; 238 ctx->active--;
239no_kmmio: 239no_kmmio:
240 preempt_enable_no_resched(); 240 preempt_enable_no_resched();
241 put_cpu_var(kmmio_ctx);
241 /* page fault not handled by kmmio */ 242 /* page fault not handled by kmmio */
242 return 0; 243 return 0;
243} 244}
@@ -249,11 +250,11 @@ no_kmmio:
249 */ 250 */
250static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) 251static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
251{ 252{
252 int cpu = smp_processor_id(); 253 int ret = 0;
253 struct kmmio_context *ctx = &kmmio_ctx[cpu]; 254 struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
254 255
255 if (!ctx->active) 256 if (!ctx->active)
256 return 0; 257 goto out;
257 258
258 if (ctx->probe && ctx->probe->post_handler) 259 if (ctx->probe && ctx->probe->post_handler)
259 ctx->probe->post_handler(ctx->probe, condition, regs); 260 ctx->probe->post_handler(ctx->probe, condition, regs);
@@ -273,10 +274,12 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
273 * will have TF set, in which case, continue the remaining processing 274 * will have TF set, in which case, continue the remaining processing
274 * of do_debug, as if this is not a probe hit. 275 * of do_debug, as if this is not a probe hit.
275 */ 276 */
276 if (regs->flags & TF_MASK) 277 if (!(regs->flags & TF_MASK))
277 return 0; 278 ret = 1;
278 279
279 return 1; 280out:
281 put_cpu_var(kmmio_ctx);
282 return ret;
280} 283}
281 284
282static int add_kmmio_fault_page(unsigned long page) 285static int add_kmmio_fault_page(unsigned long page)
diff --git a/arch/x86/kernel/mmiotrace/mmio-mod.c b/arch/x86/kernel/mmiotrace/mmio-mod.c
index 0019dcdf6158..f9c609266d83 100644
--- a/arch/x86/kernel/mmiotrace/mmio-mod.c
+++ b/arch/x86/kernel/mmiotrace/mmio-mod.c
@@ -30,6 +30,7 @@
30#include <linux/mmiotrace.h> 30#include <linux/mmiotrace.h>
31#include <asm/e820.h> /* for ISA_START_ADDRESS */ 31#include <asm/e820.h> /* for ISA_START_ADDRESS */
32#include <asm/atomic.h> 32#include <asm/atomic.h>
33#include <linux/percpu.h>
33 34
34#include "kmmio.h" 35#include "kmmio.h"
35#include "pf_in.h" 36#include "pf_in.h"
@@ -49,11 +50,11 @@ struct trap_reason {
49}; 50};
50 51
51/* Accessed per-cpu. */ 52/* Accessed per-cpu. */
52static struct trap_reason pf_reason[NR_CPUS]; 53static DEFINE_PER_CPU(struct trap_reason, pf_reason);
53static struct mm_io_header_rw cpu_trace[NR_CPUS]; 54static DEFINE_PER_CPU(struct mm_io_header_rw, cpu_trace);
54 55
55/* Access to this is not per-cpu. */ 56/* Access to this is not per-cpu. */
56static atomic_t dropped[NR_CPUS]; 57static DEFINE_PER_CPU(atomic_t, dropped);
57 58
58static struct file_operations mmio_fops = { 59static struct file_operations mmio_fops = {
59 .owner = THIS_MODULE, 60 .owner = THIS_MODULE,
@@ -150,15 +151,15 @@ static void print_pte(unsigned long address)
150 */ 151 */
151static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr) 152static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
152{ 153{
153 const unsigned long cpu = smp_processor_id(); 154 const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
154 printk(KERN_EMERG MODULE_NAME ": unexpected fault for address: %lx, " 155 printk(KERN_EMERG MODULE_NAME ": unexpected fault for address: %lx, "
155 "last fault for address: %lx\n", 156 "last fault for address: %lx\n",
156 addr, pf_reason[cpu].addr); 157 addr, my_reason->addr);
157 print_pte(addr); 158 print_pte(addr);
158#ifdef __i386__ 159#ifdef __i386__
159 print_symbol(KERN_EMERG "faulting EIP is at %s\n", regs->ip); 160 print_symbol(KERN_EMERG "faulting EIP is at %s\n", regs->ip);
160 print_symbol(KERN_EMERG "last faulting EIP was at %s\n", 161 print_symbol(KERN_EMERG "last faulting EIP was at %s\n",
161 pf_reason[cpu].ip); 162 my_reason->ip);
162 printk(KERN_EMERG 163 printk(KERN_EMERG
163 "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n", 164 "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
164 regs->ax, regs->bx, regs->cx, regs->dx); 165 regs->ax, regs->bx, regs->cx, regs->dx);
@@ -168,100 +169,105 @@ static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
168#else 169#else
169 print_symbol(KERN_EMERG "faulting RIP is at %s\n", regs->ip); 170 print_symbol(KERN_EMERG "faulting RIP is at %s\n", regs->ip);
170 print_symbol(KERN_EMERG "last faulting RIP was at %s\n", 171 print_symbol(KERN_EMERG "last faulting RIP was at %s\n",
171 pf_reason[cpu].ip); 172 my_reason->ip);
172 printk(KERN_EMERG "rax: %016lx rcx: %016lx rdx: %016lx\n", 173 printk(KERN_EMERG "rax: %016lx rcx: %016lx rdx: %016lx\n",
173 regs->ax, regs->cx, regs->dx); 174 regs->ax, regs->cx, regs->dx);
174 printk(KERN_EMERG "rsi: %016lx rdi: %016lx " 175 printk(KERN_EMERG "rsi: %016lx rdi: %016lx "
175 "rbp: %016lx rsp: %016lx\n", 176 "rbp: %016lx rsp: %016lx\n",
176 regs->si, regs->di, regs->bp, regs->sp); 177 regs->si, regs->di, regs->bp, regs->sp);
177#endif 178#endif
179 put_cpu_var(pf_reason);
178 BUG(); 180 BUG();
179} 181}
180 182
181static void pre(struct kmmio_probe *p, struct pt_regs *regs, 183static void pre(struct kmmio_probe *p, struct pt_regs *regs,
182 unsigned long addr) 184 unsigned long addr)
183{ 185{
184 const unsigned long cpu = smp_processor_id(); 186 struct trap_reason *my_reason = &get_cpu_var(pf_reason);
187 struct mm_io_header_rw *my_trace = &get_cpu_var(cpu_trace);
185 const unsigned long instptr = instruction_pointer(regs); 188 const unsigned long instptr = instruction_pointer(regs);
186 const enum reason_type type = get_ins_type(instptr); 189 const enum reason_type type = get_ins_type(instptr);
187 190
188 /* it doesn't make sense to have more than one active trace per cpu */ 191 /* it doesn't make sense to have more than one active trace per cpu */
189 if (pf_reason[cpu].active_traces) 192 if (my_reason->active_traces)
190 die_kmmio_nesting_error(regs, addr); 193 die_kmmio_nesting_error(regs, addr);
191 else 194 else
192 pf_reason[cpu].active_traces++; 195 my_reason->active_traces++;
193 196
194 pf_reason[cpu].type = type; 197 my_reason->type = type;
195 pf_reason[cpu].addr = addr; 198 my_reason->addr = addr;
196 pf_reason[cpu].ip = instptr; 199 my_reason->ip = instptr;
197 200
198 cpu_trace[cpu].header.type = MMIO_MAGIC; 201 my_trace->header.type = MMIO_MAGIC;
199 cpu_trace[cpu].header.pid = 0; 202 my_trace->header.pid = 0;
200 cpu_trace[cpu].header.data_len = sizeof(struct mm_io_rw); 203 my_trace->header.data_len = sizeof(struct mm_io_rw);
201 cpu_trace[cpu].rw.address = addr; 204 my_trace->rw.address = addr;
202 205
203 /* 206 /*
204 * Only record the program counter when requested. 207 * Only record the program counter when requested.
205 * It may taint clean-room reverse engineering. 208 * It may taint clean-room reverse engineering.
206 */ 209 */
207 if (trace_pc) 210 if (trace_pc)
208 cpu_trace[cpu].rw.pc = instptr; 211 my_trace->rw.pc = instptr;
209 else 212 else
210 cpu_trace[cpu].rw.pc = 0; 213 my_trace->rw.pc = 0;
211 214
212 record_timestamp(&cpu_trace[cpu].header); 215 record_timestamp(&my_trace->header);
213 216
214 switch (type) { 217 switch (type) {
215 case REG_READ: 218 case REG_READ:
216 cpu_trace[cpu].header.type |= 219 my_trace->header.type |=
217 (MMIO_READ << MMIO_OPCODE_SHIFT) | 220 (MMIO_READ << MMIO_OPCODE_SHIFT) |
218 (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT); 221 (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT);
219 break; 222 break;
220 case REG_WRITE: 223 case REG_WRITE:
221 cpu_trace[cpu].header.type |= 224 my_trace->header.type |=
222 (MMIO_WRITE << MMIO_OPCODE_SHIFT) | 225 (MMIO_WRITE << MMIO_OPCODE_SHIFT) |
223 (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT); 226 (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT);
224 cpu_trace[cpu].rw.value = get_ins_reg_val(instptr, regs); 227 my_trace->rw.value = get_ins_reg_val(instptr, regs);
225 break; 228 break;
226 case IMM_WRITE: 229 case IMM_WRITE:
227 cpu_trace[cpu].header.type |= 230 my_trace->header.type |=
228 (MMIO_WRITE << MMIO_OPCODE_SHIFT) | 231 (MMIO_WRITE << MMIO_OPCODE_SHIFT) |
229 (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT); 232 (get_ins_mem_width(instptr) << MMIO_WIDTH_SHIFT);
230 cpu_trace[cpu].rw.value = get_ins_imm_val(instptr); 233 my_trace->rw.value = get_ins_imm_val(instptr);
231 break; 234 break;
232 default: 235 default:
233 { 236 {
234 unsigned char *ip = (unsigned char *)instptr; 237 unsigned char *ip = (unsigned char *)instptr;
235 cpu_trace[cpu].header.type |= 238 my_trace->header.type |=
236 (MMIO_UNKNOWN_OP << MMIO_OPCODE_SHIFT); 239 (MMIO_UNKNOWN_OP << MMIO_OPCODE_SHIFT);
237 cpu_trace[cpu].rw.value = (*ip) << 16 | 240 my_trace->rw.value = (*ip) << 16 | *(ip + 1) << 8 |
238 *(ip + 1) << 8 | 241 *(ip + 2);
239 *(ip + 2);
240 } 242 }
241 } 243 }
244 put_cpu_var(cpu_trace);
245 put_cpu_var(pf_reason);
242} 246}
243 247
244static void post(struct kmmio_probe *p, unsigned long condition, 248static void post(struct kmmio_probe *p, unsigned long condition,
245 struct pt_regs *regs) 249 struct pt_regs *regs)
246{ 250{
247 const unsigned long cpu = smp_processor_id(); 251 struct trap_reason *my_reason = &get_cpu_var(pf_reason);
252 struct mm_io_header_rw *my_trace = &get_cpu_var(cpu_trace);
248 253
249 /* this should always return the active_trace count to 0 */ 254 /* this should always return the active_trace count to 0 */
250 pf_reason[cpu].active_traces--; 255 my_reason->active_traces--;
251 if (pf_reason[cpu].active_traces) { 256 if (my_reason->active_traces) {
252 printk(KERN_EMERG MODULE_NAME ": unexpected post handler"); 257 printk(KERN_EMERG MODULE_NAME ": unexpected post handler");
253 BUG(); 258 BUG();
254 } 259 }
255 260
256 switch (pf_reason[cpu].type) { 261 switch (my_reason->type) {
257 case REG_READ: 262 case REG_READ:
258 cpu_trace[cpu].rw.value = get_ins_reg_val(pf_reason[cpu].ip, 263 my_trace->rw.value = get_ins_reg_val(my_reason->ip, regs);
259 regs);
260 break; 264 break;
261 default: 265 default:
262 break; 266 break;
263 } 267 }
264 relay_write(chan, &cpu_trace[cpu], sizeof(struct mm_io_header_rw)); 268 relay_write(chan, my_trace, sizeof(*my_trace));
269 put_cpu_var(cpu_trace);
270 put_cpu_var(pf_reason);
265} 271}
266 272
267/* 273/*
@@ -274,7 +280,7 @@ static int subbuf_start_handler(struct rchan_buf *buf, void *subbuf,
274 void *prev_subbuf, size_t prev_padding) 280 void *prev_subbuf, size_t prev_padding)
275{ 281{
276 unsigned int cpu = buf->cpu; 282 unsigned int cpu = buf->cpu;
277 atomic_t *drop = &dropped[cpu]; 283 atomic_t *drop = &per_cpu(dropped, cpu);
278 int count; 284 int count;
279 if (relay_buf_full(buf)) { 285 if (relay_buf_full(buf)) {
280 if (atomic_inc_return(drop) == 1) { 286 if (atomic_inc_return(drop) == 1) {