diff options
author | Pekka Paalanen <pq@iki.fi> | 2008-05-12 15:20:58 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-24 05:25:16 -0400 |
commit | 138295373ccf7625fcb0218dfea114837983bc39 (patch) | |
tree | 3d4afbb0d976fa41435fd5806ff0338aa1f15567 /arch/x86/kernel/mmiotrace/kmmio.c | |
parent | bd8ac686c73c7e925fcfe0b02dc4e7b947127864 (diff) |
ftrace: mmiotrace update, #2
another weekend, another patch. This should apply on top of my previous patch
from March 23rd.
Summary of changes:
- Print PCI device list in output header
- work around recursive probe hits on SMP
- refactor dis/arm_kmmio_fault_page() and add check for page levels
- remove un/reference_kmmio(), the die notifier hook is registered
permanently into the list
- explicitly check for single stepping in die notifier callback
I have tested this version on my UP Athlon64 desktop with Nouveau, and
SMP Core 2 Duo laptop with the proprietary nvidia driver. Both systems
are 64-bit. One previously unknown bug crept into daylight: the ftrace
framework's output routines print the first entry last after buffer has
wrapped around.
The most important regressions compared to non-ftrace mmiotrace at this
time are:
- failure of trace_pipe file
- illegal lines in output file
- unaware of losing data due to buffer full
Personally I'd like to see these three solved before submitting to
mainline. Other issues may come up once we know when we lose events.
Signed-off-by: Pekka Paalanen <pq@iki.fi>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/mmiotrace/kmmio.c')
-rw-r--r-- | arch/x86/kernel/mmiotrace/kmmio.c | 186 |
1 files changed, 76 insertions, 110 deletions
diff --git a/arch/x86/kernel/mmiotrace/kmmio.c b/arch/x86/kernel/mmiotrace/kmmio.c index efb467933087..cd0d95fe4fe6 100644 --- a/arch/x86/kernel/mmiotrace/kmmio.c +++ b/arch/x86/kernel/mmiotrace/kmmio.c | |||
@@ -5,15 +5,12 @@ | |||
5 | * 2008 Pekka Paalanen <pq@iki.fi> | 5 | * 2008 Pekka Paalanen <pq@iki.fi> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/version.h> | ||
9 | #include <linux/list.h> | 8 | #include <linux/list.h> |
10 | #include <linux/spinlock.h> | 9 | #include <linux/spinlock.h> |
11 | #include <linux/hash.h> | 10 | #include <linux/hash.h> |
12 | #include <linux/init.h> | 11 | #include <linux/init.h> |
13 | #include <linux/module.h> | 12 | #include <linux/module.h> |
14 | #include <linux/slab.h> | ||
15 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
16 | #include <linux/mm.h> | ||
17 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
18 | #include <linux/ptrace.h> | 15 | #include <linux/ptrace.h> |
19 | #include <linux/preempt.h> | 16 | #include <linux/preempt.h> |
@@ -22,10 +19,9 @@ | |||
22 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
23 | #include <asm/io.h> | 20 | #include <asm/io.h> |
24 | #include <asm/cacheflush.h> | 21 | #include <asm/cacheflush.h> |
25 | #include <asm/errno.h> | ||
26 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
27 | #include <asm/pgtable.h> | 23 | #include <asm/errno.h> |
28 | 24 | #include <asm/debugreg.h> | |
29 | #include <linux/mmiotrace.h> | 25 | #include <linux/mmiotrace.h> |
30 | 26 | ||
31 | #define KMMIO_PAGE_HASH_BITS 4 | 27 | #define KMMIO_PAGE_HASH_BITS 4 |
@@ -57,14 +53,9 @@ struct kmmio_context { | |||
57 | int active; | 53 | int active; |
58 | }; | 54 | }; |
59 | 55 | ||
60 | static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val, | ||
61 | void *args); | ||
62 | |||
63 | static DEFINE_MUTEX(kmmio_init_mutex); | ||
64 | static DEFINE_SPINLOCK(kmmio_lock); | 56 | static DEFINE_SPINLOCK(kmmio_lock); |
65 | 57 | ||
66 | /* These are protected by kmmio_lock */ | 58 | /* Protected by kmmio_lock */ |
67 | static int kmmio_initialized; | ||
68 | unsigned int kmmio_count; | 59 | unsigned int kmmio_count; |
69 | 60 | ||
70 | /* Read-protected by RCU, write-protected by kmmio_lock. */ | 61 | /* Read-protected by RCU, write-protected by kmmio_lock. */ |
@@ -79,60 +70,6 @@ static struct list_head *kmmio_page_list(unsigned long page) | |||
79 | /* Accessed per-cpu */ | 70 | /* Accessed per-cpu */ |
80 | static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx); | 71 | static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx); |
81 | 72 | ||
82 | /* protected by kmmio_init_mutex */ | ||
83 | static struct notifier_block nb_die = { | ||
84 | .notifier_call = kmmio_die_notifier | ||
85 | }; | ||
86 | |||
87 | /** | ||
88 | * Makes sure kmmio is initialized and usable. | ||
89 | * This must be called before any other kmmio function defined here. | ||
90 | * May sleep. | ||
91 | */ | ||
92 | void reference_kmmio(void) | ||
93 | { | ||
94 | mutex_lock(&kmmio_init_mutex); | ||
95 | spin_lock_irq(&kmmio_lock); | ||
96 | if (!kmmio_initialized) { | ||
97 | int i; | ||
98 | for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) | ||
99 | INIT_LIST_HEAD(&kmmio_page_table[i]); | ||
100 | if (register_die_notifier(&nb_die)) | ||
101 | BUG(); | ||
102 | } | ||
103 | kmmio_initialized++; | ||
104 | spin_unlock_irq(&kmmio_lock); | ||
105 | mutex_unlock(&kmmio_init_mutex); | ||
106 | } | ||
107 | EXPORT_SYMBOL_GPL(reference_kmmio); | ||
108 | |||
109 | /** | ||
110 | * Clean up kmmio after use. This must be called for every call to | ||
111 | * reference_kmmio(). All probes registered after the corresponding | ||
112 | * reference_kmmio() must have been unregistered when calling this. | ||
113 | * May sleep. | ||
114 | */ | ||
115 | void unreference_kmmio(void) | ||
116 | { | ||
117 | bool unreg = false; | ||
118 | |||
119 | mutex_lock(&kmmio_init_mutex); | ||
120 | spin_lock_irq(&kmmio_lock); | ||
121 | |||
122 | if (kmmio_initialized == 1) { | ||
123 | BUG_ON(is_kmmio_active()); | ||
124 | unreg = true; | ||
125 | } | ||
126 | kmmio_initialized--; | ||
127 | BUG_ON(kmmio_initialized < 0); | ||
128 | spin_unlock_irq(&kmmio_lock); | ||
129 | |||
130 | if (unreg) | ||
131 | unregister_die_notifier(&nb_die); /* calls sync_rcu() */ | ||
132 | mutex_unlock(&kmmio_init_mutex); | ||
133 | } | ||
134 | EXPORT_SYMBOL(unreference_kmmio); | ||
135 | |||
136 | /* | 73 | /* |
137 | * this is basically a dynamic stabbing problem: | 74 | * this is basically a dynamic stabbing problem: |
138 | * Could use the existing prio tree code or | 75 | * Could use the existing prio tree code or |
@@ -167,58 +104,56 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) | |||
167 | return NULL; | 104 | return NULL; |
168 | } | 105 | } |
169 | 106 | ||
170 | /** Mark the given page as not present. Access to it will trigger a fault. */ | 107 | static void set_page_present(unsigned long addr, bool present, int *pglevel) |
171 | static void arm_kmmio_fault_page(unsigned long page, int *page_level) | ||
172 | { | 108 | { |
173 | unsigned long address = page & PAGE_MASK; | 109 | pteval_t pteval; |
110 | pmdval_t pmdval; | ||
174 | int level; | 111 | int level; |
175 | pte_t *pte = lookup_address(address, &level); | 112 | pmd_t *pmd; |
113 | pte_t *pte = lookup_address(addr, &level); | ||
176 | 114 | ||
177 | if (!pte) { | 115 | if (!pte) { |
178 | pr_err("kmmio: Error in %s: no pte for page 0x%08lx\n", | 116 | pr_err("kmmio: no pte for page 0x%08lx\n", addr); |
179 | __func__, page); | ||
180 | return; | 117 | return; |
181 | } | 118 | } |
182 | 119 | ||
183 | if (level == PG_LEVEL_2M) { | 120 | if (pglevel) |
184 | pmd_t *pmd = (pmd_t *)pte; | 121 | *pglevel = level; |
185 | set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_PRESENT)); | 122 | |
186 | } else { | 123 | switch (level) { |
187 | /* PG_LEVEL_4K */ | 124 | case PG_LEVEL_2M: |
188 | set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); | 125 | pmd = (pmd_t *)pte; |
126 | pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT; | ||
127 | if (present) | ||
128 | pmdval |= _PAGE_PRESENT; | ||
129 | set_pmd(pmd, __pmd(pmdval)); | ||
130 | break; | ||
131 | |||
132 | case PG_LEVEL_4K: | ||
133 | pteval = pte_val(*pte) & ~_PAGE_PRESENT; | ||
134 | if (present) | ||
135 | pteval |= _PAGE_PRESENT; | ||
136 | set_pte_atomic(pte, __pte(pteval)); | ||
137 | break; | ||
138 | |||
139 | default: | ||
140 | pr_err("kmmio: unexpected page level 0x%x.\n", level); | ||
141 | return; | ||
189 | } | 142 | } |
190 | 143 | ||
191 | if (page_level) | 144 | __flush_tlb_one(addr); |
192 | *page_level = level; | 145 | } |
193 | 146 | ||
194 | __flush_tlb_one(page); | 147 | /** Mark the given page as not present. Access to it will trigger a fault. */ |
148 | static void arm_kmmio_fault_page(unsigned long page, int *page_level) | ||
149 | { | ||
150 | set_page_present(page & PAGE_MASK, false, page_level); | ||
195 | } | 151 | } |
196 | 152 | ||
197 | /** Mark the given page as present. */ | 153 | /** Mark the given page as present. */ |
198 | static void disarm_kmmio_fault_page(unsigned long page, int *page_level) | 154 | static void disarm_kmmio_fault_page(unsigned long page, int *page_level) |
199 | { | 155 | { |
200 | unsigned long address = page & PAGE_MASK; | 156 | set_page_present(page & PAGE_MASK, true, page_level); |
201 | int level; | ||
202 | pte_t *pte = lookup_address(address, &level); | ||
203 | |||
204 | if (!pte) { | ||
205 | pr_err("kmmio: Error in %s: no pte for page 0x%08lx\n", | ||
206 | __func__, page); | ||
207 | return; | ||
208 | } | ||
209 | |||
210 | if (level == PG_LEVEL_2M) { | ||
211 | pmd_t *pmd = (pmd_t *)pte; | ||
212 | set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_PRESENT)); | ||
213 | } else { | ||
214 | /* PG_LEVEL_4K */ | ||
215 | set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); | ||
216 | } | ||
217 | |||
218 | if (page_level) | ||
219 | *page_level = level; | ||
220 | |||
221 | __flush_tlb_one(page); | ||
222 | } | 157 | } |
223 | 158 | ||
224 | /* | 159 | /* |
@@ -240,6 +175,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) | |||
240 | { | 175 | { |
241 | struct kmmio_context *ctx; | 176 | struct kmmio_context *ctx; |
242 | struct kmmio_fault_page *faultpage; | 177 | struct kmmio_fault_page *faultpage; |
178 | int ret = 0; /* default to fault not handled */ | ||
243 | 179 | ||
244 | /* | 180 | /* |
245 | * Preemption is now disabled to prevent process switch during | 181 | * Preemption is now disabled to prevent process switch during |
@@ -257,21 +193,35 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) | |||
257 | /* | 193 | /* |
258 | * Either this page fault is not caused by kmmio, or | 194 | * Either this page fault is not caused by kmmio, or |
259 | * another CPU just pulled the kmmio probe from under | 195 | * another CPU just pulled the kmmio probe from under |
260 | * our feet. In the latter case all hell breaks loose. | 196 | * our feet. The latter case should not be possible. |
261 | */ | 197 | */ |
262 | goto no_kmmio; | 198 | goto no_kmmio; |
263 | } | 199 | } |
264 | 200 | ||
265 | ctx = &get_cpu_var(kmmio_ctx); | 201 | ctx = &get_cpu_var(kmmio_ctx); |
266 | if (ctx->active) { | 202 | if (ctx->active) { |
203 | disarm_kmmio_fault_page(faultpage->page, NULL); | ||
204 | if (addr == ctx->addr) { | ||
205 | /* | ||
206 | * On SMP we sometimes get recursive probe hits on the | ||
207 | * same address. Context is already saved, fall out. | ||
208 | */ | ||
209 | pr_debug("kmmio: duplicate probe hit on CPU %d, for " | ||
210 | "address 0x%08lx.\n", | ||
211 | smp_processor_id(), addr); | ||
212 | ret = 1; | ||
213 | goto no_kmmio_ctx; | ||
214 | } | ||
267 | /* | 215 | /* |
268 | * Prevent overwriting already in-flight context. | 216 | * Prevent overwriting already in-flight context. |
269 | * If this page fault really was due to kmmio trap, | 217 | * This should not happen, let's hope disarming at least |
270 | * all hell breaks loose. | 218 | * prevents a panic. |
271 | */ | 219 | */ |
272 | pr_emerg("kmmio: recursive probe hit on CPU %d, " | 220 | pr_emerg("kmmio: recursive probe hit on CPU %d, " |
273 | "for address 0x%08lx. Ignoring.\n", | 221 | "for address 0x%08lx. Ignoring.\n", |
274 | smp_processor_id(), addr); | 222 | smp_processor_id(), addr); |
223 | pr_emerg("kmmio: previous hit was at 0x%08lx.\n", | ||
224 | ctx->addr); | ||
275 | goto no_kmmio_ctx; | 225 | goto no_kmmio_ctx; |
276 | } | 226 | } |
277 | ctx->active++; | 227 | ctx->active++; |
@@ -302,14 +252,14 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) | |||
302 | */ | 252 | */ |
303 | 253 | ||
304 | put_cpu_var(kmmio_ctx); | 254 | put_cpu_var(kmmio_ctx); |
305 | return 1; | 255 | return 1; /* fault handled */ |
306 | 256 | ||
307 | no_kmmio_ctx: | 257 | no_kmmio_ctx: |
308 | put_cpu_var(kmmio_ctx); | 258 | put_cpu_var(kmmio_ctx); |
309 | no_kmmio: | 259 | no_kmmio: |
310 | rcu_read_unlock(); | 260 | rcu_read_unlock(); |
311 | preempt_enable_no_resched(); | 261 | preempt_enable_no_resched(); |
312 | return 0; /* page fault not handled by kmmio */ | 262 | return ret; |
313 | } | 263 | } |
314 | 264 | ||
315 | /* | 265 | /* |
@@ -322,8 +272,11 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) | |||
322 | int ret = 0; | 272 | int ret = 0; |
323 | struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); | 273 | struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx); |
324 | 274 | ||
325 | if (!ctx->active) | 275 | if (!ctx->active) { |
276 | pr_debug("kmmio: spurious debug trap on CPU %d.\n", | ||
277 | smp_processor_id()); | ||
326 | goto out; | 278 | goto out; |
279 | } | ||
327 | 280 | ||
328 | if (ctx->probe && ctx->probe->post_handler) | 281 | if (ctx->probe && ctx->probe->post_handler) |
329 | ctx->probe->post_handler(ctx->probe, condition, regs); | 282 | ctx->probe->post_handler(ctx->probe, condition, regs); |
@@ -525,9 +478,22 @@ static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val, | |||
525 | { | 478 | { |
526 | struct die_args *arg = args; | 479 | struct die_args *arg = args; |
527 | 480 | ||
528 | if (val == DIE_DEBUG) | 481 | if (val == DIE_DEBUG && (arg->err & DR_STEP)) |
529 | if (post_kmmio_handler(arg->err, arg->regs) == 1) | 482 | if (post_kmmio_handler(arg->err, arg->regs) == 1) |
530 | return NOTIFY_STOP; | 483 | return NOTIFY_STOP; |
531 | 484 | ||
532 | return NOTIFY_DONE; | 485 | return NOTIFY_DONE; |
533 | } | 486 | } |
487 | |||
488 | static struct notifier_block nb_die = { | ||
489 | .notifier_call = kmmio_die_notifier | ||
490 | }; | ||
491 | |||
492 | static int __init init_kmmio(void) | ||
493 | { | ||
494 | int i; | ||
495 | for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++) | ||
496 | INIT_LIST_HEAD(&kmmio_page_table[i]); | ||
497 | return register_die_notifier(&nb_die); | ||
498 | } | ||
499 | fs_initcall(init_kmmio); /* should be before device_initcall() */ | ||