aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/mm/init.c1
-rw-r--r--include/asm-i386/pgtable.h21
2 files changed, 22 insertions, 0 deletions
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 4a5a914b3432..90089c14c23d 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -493,6 +493,7 @@ int __init set_kernel_exec(unsigned long vaddr, int enable)
493 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32)); 493 pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
494 else 494 else
495 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32); 495 pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
496 pte_update_defer(&init_mm, vaddr, pte);
496 __flush_tlb_all(); 497 __flush_tlb_all();
497out: 498out:
498 return ret; 499 return ret;
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 8cb708a6bed0..7d398f493dde 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -247,6 +247,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
247#endif 247#endif
248 248
249/* 249/*
250 * Rules for using pte_update - it must be called after any PTE update which
251 * has not been done using the set_pte / clear_pte interfaces. It is used by
252 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
253 * updates should either be sets, clears, or set_pte_atomic for P->P
254 * transitions, which means this hook should only be called for user PTEs.
255 * This hook implies a P->P protection or access change has taken place, which
256 * requires a subsequent TLB flush. The notification can optionally be delayed
257 * until the TLB flush event by using the pte_update_defer form of the
258 * interface, but care must be taken to assure that the flush happens while
259 * still holding the same page table lock so that the shadow and primary pages
260 * do not become out of sync on SMP.
261 */
262#define pte_update(mm, addr, ptep) do { } while (0)
263#define pte_update_defer(mm, addr, ptep) do { } while (0)
264
265
266/*
250 * We only update the dirty/accessed state if we set 267 * We only update the dirty/accessed state if we set
251 * the dirty bit by hand in the kernel, since the hardware 268 * the dirty bit by hand in the kernel, since the hardware
252 * will do the accessed bit for us, and we don't want to 269 * will do the accessed bit for us, and we don't want to
@@ -258,6 +275,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
258do { \ 275do { \
259 if (dirty) { \ 276 if (dirty) { \
260 (ptep)->pte_low = (entry).pte_low; \ 277 (ptep)->pte_low = (entry).pte_low; \
278 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
261 flush_tlb_page(vma, address); \ 279 flush_tlb_page(vma, address); \
262 } \ 280 } \
263} while (0) 281} while (0)
@@ -287,6 +305,7 @@ do { \
287 __dirty = pte_dirty(*(ptep)); \ 305 __dirty = pte_dirty(*(ptep)); \
288 if (__dirty) { \ 306 if (__dirty) { \
289 clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \ 307 clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \
308 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
290 flush_tlb_page(vma, address); \ 309 flush_tlb_page(vma, address); \
291 } \ 310 } \
292 __dirty; \ 311 __dirty; \
@@ -299,6 +318,7 @@ do { \
299 __young = pte_young(*(ptep)); \ 318 __young = pte_young(*(ptep)); \
300 if (__young) { \ 319 if (__young) { \
301 clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \ 320 clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \
321 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
302 flush_tlb_page(vma, address); \ 322 flush_tlb_page(vma, address); \
303 } \ 323 } \
304 __young; \ 324 __young; \
@@ -321,6 +341,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
321static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 341static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
322{ 342{
323 clear_bit(_PAGE_BIT_RW, &ptep->pte_low); 343 clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
344 pte_update(mm, addr, ptep);
324} 345}
325 346
326/* 347/*