aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-i386/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-i386/pgtable.h')
-rw-r--r--include/asm-i386/pgtable.h45
1 files changed, 22 insertions, 23 deletions
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index 64140f2f1b95..541b3e234335 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -246,6 +246,23 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
246# include <asm/pgtable-2level.h> 246# include <asm/pgtable-2level.h>
247#endif 247#endif
248 248
249/*
250 * We only update the dirty/accessed state if we set
251 * the dirty bit by hand in the kernel, since the hardware
252 * will do the accessed bit for us, and we don't want to
253 * race with other CPU's that might be updating the dirty
254 * bit at the same time.
255 */
256#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
257#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
258do { \
259 if (dirty) { \
260 (ptep)->pte_low = (entry).pte_low; \
261 flush_tlb_page(vma, address); \
262 } \
263} while (0)
264
265#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
249static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 266static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
250{ 267{
251 if (!pte_dirty(*ptep)) 268 if (!pte_dirty(*ptep))
@@ -253,6 +270,7 @@ static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned
253 return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low); 270 return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte_low);
254} 271}
255 272
273#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
256static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) 274static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
257{ 275{
258 if (!pte_young(*ptep)) 276 if (!pte_young(*ptep))
@@ -260,6 +278,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
260 return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low); 278 return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte_low);
261} 279}
262 280
281#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
263static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) 282static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
264{ 283{
265 pte_t pte; 284 pte_t pte;
@@ -272,6 +291,7 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
272 return pte; 291 return pte;
273} 292}
274 293
294#define __HAVE_ARCH_PTEP_SET_WRPROTECT
275static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 295static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
276{ 296{
277 clear_bit(_PAGE_BIT_RW, &ptep->pte_low); 297 clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
@@ -364,11 +384,11 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
364#define pte_index(address) \ 384#define pte_index(address) \
365 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 385 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
366#define pte_offset_kernel(dir, address) \ 386#define pte_offset_kernel(dir, address) \
367 ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) 387 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
368 388
369#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) 389#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
370 390
371#define pmd_page_kernel(pmd) \ 391#define pmd_page_vaddr(pmd) \
372 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) 392 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
373 393
374/* 394/*
@@ -409,23 +429,8 @@ extern pte_t *lookup_address(unsigned long address);
409/* 429/*
410 * The i386 doesn't have any external MMU info: the kernel page 430 * The i386 doesn't have any external MMU info: the kernel page
411 * tables contain all the necessary information. 431 * tables contain all the necessary information.
412 *
413 * Also, we only update the dirty/accessed state if we set
414 * the dirty bit by hand in the kernel, since the hardware
415 * will do the accessed bit for us, and we don't want to
416 * race with other CPU's that might be updating the dirty
417 * bit at the same time.
418 */ 432 */
419#define update_mmu_cache(vma,address,pte) do { } while (0) 433#define update_mmu_cache(vma,address,pte) do { } while (0)
420#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
421#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
422 do { \
423 if (__dirty) { \
424 (__ptep)->pte_low = (__entry).pte_low; \
425 flush_tlb_page(__vma, __address); \
426 } \
427 } while (0)
428
429#endif /* !__ASSEMBLY__ */ 434#endif /* !__ASSEMBLY__ */
430 435
431#ifdef CONFIG_FLATMEM 436#ifdef CONFIG_FLATMEM
@@ -439,12 +444,6 @@ extern pte_t *lookup_address(unsigned long address);
439#define GET_IOSPACE(pfn) 0 444#define GET_IOSPACE(pfn) 0
440#define GET_PFN(pfn) (pfn) 445#define GET_PFN(pfn) (pfn)
441 446
442#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
443#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
444#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
445#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
446#define __HAVE_ARCH_PTEP_SET_WRPROTECT
447#define __HAVE_ARCH_PTE_SAME
448#include <asm-generic/pgtable.h> 447#include <asm-generic/pgtable.h>
449 448
450#endif /* _I386_PGTABLE_H */ 449#endif /* _I386_PGTABLE_H */