aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/pgtable.h')
-rw-r--r--include/asm-i386/pgtable.h80
1 files changed, 67 insertions, 13 deletions
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 541b3e234335..7d398f493dde 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -247,6 +247,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
247#endif 247#endif
248 248
249/* 249/*
250 * Rules for using pte_update - it must be called after any PTE update which
251 * has not been done using the set_pte / clear_pte interfaces. It is used by
252 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
253 * updates should either be sets, clears, or set_pte_atomic for P->P
254 * transitions, which means this hook should only be called for user PTEs.
255 * This hook implies a P->P protection or access change has taken place, which
256 * requires a subsequent TLB flush. The notification can optionally be delayed
257 * until the TLB flush event by using the pte_update_defer form of the
258 * interface, but care must be taken to assure that the flush happens while
259 * still holding the same page table lock so that the shadow and primary pages
260 * do not become out of sync on SMP.
261 */
262#define pte_update(mm, addr, ptep) do { } while (0)
263#define pte_update_defer(mm, addr, ptep) do { } while (0)
264
265
266/*
250 * We only update the dirty/accessed state if we set 267 * We only update the dirty/accessed state if we set
251 * the dirty bit by hand in the kernel, since the hardware 268 * the dirty bit by hand in the kernel, since the hardware
252 * will do the accessed bit for us, and we don't want to 269 * will do the accessed bit for us, and we don't want to
@@ -258,25 +275,54 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
258do { \ 275do { \
259 if (dirty) { \ 276 if (dirty) { \
260 (ptep)->pte_low = (entry).pte_low; \ 277 (ptep)->pte_low = (entry).pte_low; \
278 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
261 flush_tlb_page(vma, address); \ 279 flush_tlb_page(vma, address); \
262 } \ 280 } \
263} while (0) 281} while (0)
264 282
283/*
284 * We don't actually have these, but we want to advertise them so that
285 * we can encompass the flush here.
286 */
265#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY 287#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
266static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
267{
268 if (!pte_dirty(*ptep))
269 return 0;
270 return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
271}
272
273#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 288#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
274static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 289
275{ 290/*
276 if (!pte_young(*ptep)) 291 * Rules for using ptep_establish: the pte MUST be a user pte, and
277 return 0; 292 * must be a present->present transition.
278 return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); 293 */
279} 294#define __HAVE_ARCH_PTEP_ESTABLISH
295#define ptep_establish(vma, address, ptep, pteval) \
296do { \
297 set_pte_present((vma)->vm_mm, address, ptep, pteval); \
298 flush_tlb_page(vma, address); \
299} while (0)
300
301#define __HAVE_ARCH_PTEP_CLEAR_DIRTY_FLUSH
302#define ptep_clear_flush_dirty(vma, address, ptep) \
303({ \
304 int __dirty; \
305 __dirty = pte_dirty(*(ptep)); \
306 if (__dirty) { \
307 clear_bit(_PAGE_BIT_DIRTY, &(ptep)->pte_low); \
308 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
309 flush_tlb_page(vma, address); \
310 } \
311 __dirty; \
312})
313
314#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
315#define ptep_clear_flush_young(vma, address, ptep) \
316({ \
317 int __young; \
318 __young = pte_young(*(ptep)); \
319 if (__young) { \
320 clear_bit(_PAGE_BIT_ACCESSED, &(ptep)->pte_low); \
321 pte_update_defer((vma)->vm_mm, (addr), (ptep)); \
322 flush_tlb_page(vma, address); \
323 } \
324 __young; \
325})
280 326
281#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 327#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
282static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) 328static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
@@ -295,6 +341,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
295static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 341static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
296{ 342{
297 clear_bit(_PAGE_BIT_RW, &ptep->pte_low); 343 clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
344 pte_update(mm, addr, ptep);
298} 345}
299 346
300/* 347/*
@@ -426,6 +473,13 @@ extern pte_t *lookup_address(unsigned long address);
426#define pte_unmap_nested(pte) do { } while (0) 473#define pte_unmap_nested(pte) do { } while (0)
427#endif 474#endif
428 475
476/* Clear a kernel PTE and flush it from the TLB */
477#define kpte_clear_flush(ptep, vaddr) \
478do { \
479 pte_clear(&init_mm, vaddr, ptep); \
480 __flush_tlb_one(vaddr); \
481} while (0)
482
429/* 483/*
430 * The i386 doesn't have any external MMU info: the kernel page 484 * The i386 doesn't have any external MMU info: the kernel page
431 * tables contain all the necessary information. 485 * tables contain all the necessary information.