aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2006-10-01 02:29:38 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-01 03:39:34 -0400
commit789e6ac0a7cbbb38402293256a295302fd8a1100 (patch)
tree65a1c946f72bdbc8226c2fab5882cc56b9da031e /include
parenta93cb055a23f3172c1e6a22ac1dc4f1c07929b08 (diff)
[PATCH] paravirt: update pte hook
Add a pte_update_hook which notifies about pte changes that have been made without using the set_pte / clear_pte interfaces. This allows shadow mode hypervisors which do not trap on page table access to maintain synchronized shadows. It also turns out, there was one pte update in PAE mode that wasn't using any accessor interface at all for setting NX protection. Considering it is PAE specific, and the accessor is i386 specific, I didn't want to add a generic encapsulation of this behavior yet. Signed-off-by: Zachary Amsden <zach@vmware.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Andi Kleen <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-i386/pgtable.h21
1 files changed, 21 insertions, 0 deletions
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 8cb708a6bed0..7d398f493dde 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -247,6 +247,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
247#endif 247#endif
248 248
249/* 249/*
250 * Rules for using pte_update - it must be called after any PTE update which
251 * has not been done using the set_pte / clear_pte interfaces. It is used by
252 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
253 * updates should either be sets, clears, or set_pte_atomic for P->P
254 * transitions, which means this hook should only be called for user PTEs.
255 * This hook implies a P->P protection or access change has taken place, which
256 * requires a subsequent TLB flush. The notification can optionally be delayed
257 * until the TLB flush event by using the pte_update_defer form of the
258 * interface, but care must be taken to assure that the flush happens while
259 * still holding the same page table lock so that the shadow and primary pages
260 * do not become out of sync on SMP.
261 */
262#define pte_update(mm, addr, ptep) do { } while (0)
263#define pte_update_defer(mm, addr, ptep) do { } while (0)
264
265
266/*
250 * We only update the dirty/accessed state if we set 267 * We only update the dirty/accessed state if we set
251 * the dirty bit by hand in the kernel, since the hardware 268 * the dirty bit by hand in the kernel, since the hardware
252 * will do the accessed bit for us, and we don't want to 269 * will do the accessed bit for us, and we don't want to
@@ -258,6 +275,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
258do { \ 275do { \
259 if (dirty) { \ 276 if (dirty) { \
260 (ptep)->pte_low = (entry).pte_low; \ 277 (ptep)->pte_low = (entry).pte_low; \
278 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
261 flush_tlb_page(vma, address); \ 279 flush_tlb_page(vma, address); \
262 } \ 280 } \
263} while (0) 281} while (0)
@@ -287,6 +305,7 @@ do { \
287 __dirty = pte_dirty(*(ptep)); \ 305 __dirty = pte_dirty(*(ptep)); \
288 if (__dirty) { \ 306 if (__dirty) { \
289 clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \ 307 clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \
308 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
290 flush_tlb_page(vma, address); \ 309 flush_tlb_page(vma, address); \
291 } \ 310 } \
292 __dirty; \ 311 __dirty; \
@@ -299,6 +318,7 @@ do { \
299 __young = pte_young(*(ptep)); \ 318 __young = pte_young(*(ptep)); \
300 if (__young) { \ 319 if (__young) { \
301 clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \ 320 clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \
321 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
302 flush_tlb_page(vma, address); \ 322 flush_tlb_page(vma, address); \
303 } \ 323 } \
304 __young; \ 324 __young; \
@@ -321,6 +341,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
321static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 341static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
322{ 342{
323 clear_bit(_PAGE_BIT_RW, &ptep->pte_low); 343 clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
344 pte_update(mm, addr, ptep);
324} 345}
325 346
326/* 347/*