diff options
author | Pekka Paalanen <pq@iki.fi> | 2009-03-01 09:11:58 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-02 04:20:36 -0500 |
commit | 5359b585fb5edb3db34d6cd491e1475b098c61d3 (patch) | |
tree | c7b743a28a0561977777e8d30ef64ba70b745ef2 /arch/x86/mm/kmmio.c | |
parent | e9d54cae8f03e7f963a12f44bd50d68f49b9ea36 (diff) |
x86 mmiotrace: fix save/restore page table state
From baa99e2b32449ec7bf147c234adfa444caecac8a Mon Sep 17 00:00:00 2001
From: Pekka Paalanen <pq@iki.fi>
Date: Sun, 22 Feb 2009 20:02:43 +0200
Blindly setting _PAGE_PRESENT in disarm_kmmio_fault_page() overlooks the
possibility, that the page was not present when it was armed.
Make arm_kmmio_fault_page() store the previous page presence in struct
kmmio_fault_page and use it on disarm.
This patch was originally written by Stuart Bennett, but Pekka Paalanen
rewrote it a little different.
Signed-off-by: Pekka Paalanen <pq@iki.fi>
Cc: Stuart Bennett <stuart@freedesktop.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/kmmio.c')
-rw-r--r-- | arch/x86/mm/kmmio.c | 66 |
1 files changed, 44 insertions, 22 deletions
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index fb1f11546fcd..be361eb828c8 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c | |||
@@ -32,6 +32,8 @@ struct kmmio_fault_page { | |||
32 | struct list_head list; | 32 | struct list_head list; |
33 | struct kmmio_fault_page *release_next; | 33 | struct kmmio_fault_page *release_next; |
34 | unsigned long page; /* location of the fault page */ | 34 | unsigned long page; /* location of the fault page */ |
35 | bool old_presence; /* page presence prior to arming */ | ||
36 | bool armed; | ||
35 | 37 | ||
36 | /* | 38 | /* |
37 | * Number of times this page has been registered as a part | 39 | * Number of times this page has been registered as a part |
@@ -105,8 +107,7 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page) | |||
105 | return NULL; | 107 | return NULL; |
106 | } | 108 | } |
107 | 109 | ||
108 | static int set_page_present(unsigned long addr, bool present, | 110 | static int set_page_presence(unsigned long addr, bool present, bool *old) |
109 | unsigned int *pglevel) | ||
110 | { | 111 | { |
111 | pteval_t pteval; | 112 | pteval_t pteval; |
112 | pmdval_t pmdval; | 113 | pmdval_t pmdval; |
@@ -119,20 +120,21 @@ static int set_page_present(unsigned long addr, bool present, | |||
119 | return -1; | 120 | return -1; |
120 | } | 121 | } |
121 | 122 | ||
122 | if (pglevel) | ||
123 | *pglevel = level; | ||
124 | |||
125 | switch (level) { | 123 | switch (level) { |
126 | case PG_LEVEL_2M: | 124 | case PG_LEVEL_2M: |
127 | pmd = (pmd_t *)pte; | 125 | pmd = (pmd_t *)pte; |
128 | pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT; | 126 | pmdval = pmd_val(*pmd); |
127 | *old = !!(pmdval & _PAGE_PRESENT); | ||
128 | pmdval &= ~_PAGE_PRESENT; | ||
129 | if (present) | 129 | if (present) |
130 | pmdval |= _PAGE_PRESENT; | 130 | pmdval |= _PAGE_PRESENT; |
131 | set_pmd(pmd, __pmd(pmdval)); | 131 | set_pmd(pmd, __pmd(pmdval)); |
132 | break; | 132 | break; |
133 | 133 | ||
134 | case PG_LEVEL_4K: | 134 | case PG_LEVEL_4K: |
135 | pteval = pte_val(*pte) & ~_PAGE_PRESENT; | 135 | pteval = pte_val(*pte); |
136 | *old = !!(pteval & _PAGE_PRESENT); | ||
137 | pteval &= ~_PAGE_PRESENT; | ||
136 | if (present) | 138 | if (present) |
137 | pteval |= _PAGE_PRESENT; | 139 | pteval |= _PAGE_PRESENT; |
138 | set_pte_atomic(pte, __pte(pteval)); | 140 | set_pte_atomic(pte, __pte(pteval)); |
@@ -148,19 +150,39 @@ static int set_page_present(unsigned long addr, bool present, | |||
148 | return 0; | 150 | return 0; |
149 | } | 151 | } |
150 | 152 | ||
151 | /** Mark the given page as not present. Access to it will trigger a fault. */ | 153 | /* |
152 | static int arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel) | 154 | * Mark the given page as not present. Access to it will trigger a fault. |
155 | * | ||
156 | * Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the | ||
157 | * protection is ignored here. RCU read lock is assumed held, so the struct | ||
158 | * will not disappear unexpectedly. Furthermore, the caller must guarantee, | ||
159 | * that double arming the same virtual address (page) cannot occur. | ||
160 | * | ||
161 | * Double disarming on the other hand is allowed, and may occur when a fault | ||
162 | * and mmiotrace shutdown happen simultaneously. | ||
163 | */ | ||
164 | static int arm_kmmio_fault_page(struct kmmio_fault_page *f) | ||
153 | { | 165 | { |
154 | int ret = set_page_present(page & PAGE_MASK, false, pglevel); | 166 | int ret; |
155 | WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", page); | 167 | WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n"); |
168 | if (f->armed) { | ||
169 | pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n", | ||
170 | f->page, f->count, f->old_presence); | ||
171 | } | ||
172 | ret = set_page_presence(f->page, false, &f->old_presence); | ||
173 | WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page); | ||
174 | f->armed = true; | ||
156 | return ret; | 175 | return ret; |
157 | } | 176 | } |
158 | 177 | ||
159 | /** Mark the given page as present. */ | 178 | /** Restore the given page to saved presence state. */ |
160 | static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel) | 179 | static void disarm_kmmio_fault_page(struct kmmio_fault_page *f) |
161 | { | 180 | { |
162 | int ret = set_page_present(page & PAGE_MASK, true, pglevel); | 181 | bool tmp; |
163 | WARN_ONCE(ret < 0, KERN_ERR "kmmio disarming 0x%08lx failed.\n", page); | 182 | int ret = set_page_presence(f->page, f->old_presence, &tmp); |
183 | WARN_ONCE(ret < 0, | ||
184 | KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page); | ||
185 | f->armed = false; | ||
164 | } | 186 | } |
165 | 187 | ||
166 | /* | 188 | /* |
@@ -207,7 +229,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) | |||
207 | 229 | ||
208 | ctx = &get_cpu_var(kmmio_ctx); | 230 | ctx = &get_cpu_var(kmmio_ctx); |
209 | if (ctx->active) { | 231 | if (ctx->active) { |
210 | disarm_kmmio_fault_page(faultpage->page, NULL); | 232 | disarm_kmmio_fault_page(faultpage); |
211 | if (addr == ctx->addr) { | 233 | if (addr == ctx->addr) { |
212 | /* | 234 | /* |
213 | * On SMP we sometimes get recursive probe hits on the | 235 | * On SMP we sometimes get recursive probe hits on the |
@@ -249,7 +271,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr) | |||
249 | regs->flags &= ~X86_EFLAGS_IF; | 271 | regs->flags &= ~X86_EFLAGS_IF; |
250 | 272 | ||
251 | /* Now we set present bit in PTE and single step. */ | 273 | /* Now we set present bit in PTE and single step. */ |
252 | disarm_kmmio_fault_page(ctx->fpage->page, NULL); | 274 | disarm_kmmio_fault_page(ctx->fpage); |
253 | 275 | ||
254 | /* | 276 | /* |
255 | * If another cpu accesses the same page while we are stepping, | 277 | * If another cpu accesses the same page while we are stepping, |
@@ -288,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs) | |||
288 | if (ctx->probe && ctx->probe->post_handler) | 310 | if (ctx->probe && ctx->probe->post_handler) |
289 | ctx->probe->post_handler(ctx->probe, condition, regs); | 311 | ctx->probe->post_handler(ctx->probe, condition, regs); |
290 | 312 | ||
291 | arm_kmmio_fault_page(ctx->fpage->page, NULL); | 313 | arm_kmmio_fault_page(ctx->fpage); |
292 | 314 | ||
293 | regs->flags &= ~X86_EFLAGS_TF; | 315 | regs->flags &= ~X86_EFLAGS_TF; |
294 | regs->flags |= ctx->saved_flags; | 316 | regs->flags |= ctx->saved_flags; |
@@ -320,19 +342,19 @@ static int add_kmmio_fault_page(unsigned long page) | |||
320 | f = get_kmmio_fault_page(page); | 342 | f = get_kmmio_fault_page(page); |
321 | if (f) { | 343 | if (f) { |
322 | if (!f->count) | 344 | if (!f->count) |
323 | arm_kmmio_fault_page(f->page, NULL); | 345 | arm_kmmio_fault_page(f); |
324 | f->count++; | 346 | f->count++; |
325 | return 0; | 347 | return 0; |
326 | } | 348 | } |
327 | 349 | ||
328 | f = kmalloc(sizeof(*f), GFP_ATOMIC); | 350 | f = kzalloc(sizeof(*f), GFP_ATOMIC); |
329 | if (!f) | 351 | if (!f) |
330 | return -1; | 352 | return -1; |
331 | 353 | ||
332 | f->count = 1; | 354 | f->count = 1; |
333 | f->page = page; | 355 | f->page = page; |
334 | 356 | ||
335 | if (arm_kmmio_fault_page(f->page, NULL)) { | 357 | if (arm_kmmio_fault_page(f)) { |
336 | kfree(f); | 358 | kfree(f); |
337 | return -1; | 359 | return -1; |
338 | } | 360 | } |
@@ -356,7 +378,7 @@ static void release_kmmio_fault_page(unsigned long page, | |||
356 | f->count--; | 378 | f->count--; |
357 | BUG_ON(f->count < 0); | 379 | BUG_ON(f->count < 0); |
358 | if (!f->count) { | 380 | if (!f->count) { |
359 | disarm_kmmio_fault_page(f->page, NULL); | 381 | disarm_kmmio_fault_page(f); |
360 | f->release_next = *release_list; | 382 | f->release_next = *release_list; |
361 | *release_list = f; | 383 | *release_list = f; |
362 | } | 384 | } |