diff options
author | Zachary Amsden <zach@vmware.com> | 2007-05-06 17:49:20 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 15:12:52 -0400 |
commit | 0013572b2ae535bfd6314f22d9aef53725ea00d8 (patch) | |
tree | 0c405dfe8a106099696ed9955b4405e6d7caed70 /include/asm-i386/pgtable.h | |
parent | 10a8d6ae4b3182d6588a5809a8366343bc295c20 (diff) |
i386: use pte_update_defer in ptep_test_and_clear_{dirty,young}
If you actually clear the bit, you need to:
+ pte_update_defer(vma->vm_mm, addr, ptep);
The reason is, when updating PTEs, the hypervisor must be notified. Using
atomic operations to do this is fine for all hypervisors I am aware of.
However, for hypervisors which shadow page tables, if these PTE
modifications are not trapped, you need a post-modification call to fulfill
the update of the shadow page table.
Acked-by: Zachary Amsden <zach@vmware.com>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-i386/pgtable.h')
-rw-r--r-- | include/asm-i386/pgtable.h | 38 |
1 files changed, 18 insertions, 20 deletions
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 995e8b34efd0..e16359f81a40 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h | |||
@@ -297,22 +297,24 @@ do { \ | |||
297 | } while (0) | 297 | } while (0) |
298 | 298 | ||
299 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | 299 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY |
300 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, | 300 | #define ptep_test_and_clear_dirty(vma, addr, ptep) ({ \ |
301 | unsigned long addr, pte_t *ptep) | 301 | int ret = 0; \ |
302 | { | 302 | if (pte_dirty(*ptep)) \ |
303 | if (!pte_dirty(*ptep)) | 303 | ret = test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); \ |
304 | return 0; | 304 | if (ret) \ |
305 | return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); | 305 | pte_update_defer(vma->vm_mm, addr, ptep); \ |
306 | } | 306 | ret; \ |
307 | }) | ||
307 | 308 | ||
308 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 309 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
309 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | 310 | #define ptep_test_and_clear_young(vma, addr, ptep) ({ \ |
310 | unsigned long addr, pte_t *ptep) | 311 | int ret = 0; \ |
311 | { | 312 | if (pte_young(*ptep)) \ |
312 | if (!pte_young(*ptep)) | 313 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); \ |
313 | return 0; | 314 | if (ret) \ |
314 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); | 315 | pte_update_defer(vma->vm_mm, addr, ptep); \ |
315 | } | 316 | ret; \ |
317 | }) | ||
316 | 318 | ||
317 | /* | 319 | /* |
318 | * Rules for using ptep_establish: the pte MUST be a user pte, and | 320 | * Rules for using ptep_establish: the pte MUST be a user pte, and |
@@ -330,10 +332,8 @@ do { \ | |||
330 | ({ \ | 332 | ({ \ |
331 | int __dirty; \ | 333 | int __dirty; \ |
332 | __dirty = ptep_test_and_clear_dirty((vma), (address), (ptep)); \ | 334 | __dirty = ptep_test_and_clear_dirty((vma), (address), (ptep)); \ |
333 | if (__dirty) { \ | 335 | if (__dirty) \ |
334 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ | ||
335 | flush_tlb_page(vma, address); \ | 336 | flush_tlb_page(vma, address); \ |
336 | } \ | ||
337 | __dirty; \ | 337 | __dirty; \ |
338 | }) | 338 | }) |
339 | 339 | ||
@@ -342,10 +342,8 @@ do { \ | |||
342 | ({ \ | 342 | ({ \ |
343 | int __young; \ | 343 | int __young; \ |
344 | __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ | 344 | __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ |
345 | if (__young) { \ | 345 | if (__young) \ |
346 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ | ||
347 | flush_tlb_page(vma, address); \ | 346 | flush_tlb_page(vma, address); \ |
348 | } \ | ||
349 | __young; \ | 347 | __young; \ |
350 | }) | 348 | }) |
351 | 349 | ||