diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-01-30 07:32:58 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:32:58 -0500 |
commit | 195466dc4b9b8a4cc89d37ea1211746f3afbc941 (patch) | |
tree | 38a4dc9e105d54cf285cdcbc141b424a2fc16f41 /include/asm-x86/pgtable_64.h | |
parent | e33287013585e96180c575288bf1db22bee47b52 (diff) |
x86: pgtable: unify pte accessors
Make various pte accessors common.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/pgtable_64.h')
-rw-r--r-- | include/asm-x86/pgtable_64.h | 43 |
1 files changed, 4 insertions, 39 deletions
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 77038d8e9bfd..987f51f684a5 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -101,18 +101,18 @@ static inline void pgd_clear (pgd_t * pgd) | |||
101 | set_pgd(pgd, __pgd(0)); | 101 | set_pgd(pgd, __pgd(0)); |
102 | } | 102 | } |
103 | 103 | ||
104 | #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) | 104 | #define native_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte, 0)) |
105 | 105 | ||
106 | struct mm_struct; | 106 | struct mm_struct; |
107 | 107 | ||
108 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | 108 | static inline pte_t native_ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) |
109 | { | 109 | { |
110 | pte_t pte; | 110 | pte_t pte; |
111 | if (full) { | 111 | if (full) { |
112 | pte = *ptep; | 112 | pte = *ptep; |
113 | *ptep = __pte(0); | 113 | *ptep = __pte(0); |
114 | } else { | 114 | } else { |
115 | pte = ptep_get_and_clear(mm, addr, ptep); | 115 | pte = native_ptep_get_and_clear(ptep); |
116 | } | 116 | } |
117 | return pte; | 117 | return pte; |
118 | } | 118 | } |
@@ -158,26 +158,12 @@ static inline unsigned long pmd_bad(pmd_t pmd) | |||
158 | 158 | ||
159 | #define pte_none(x) (!pte_val(x)) | 159 | #define pte_none(x) (!pte_val(x)) |
160 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 160 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
161 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | 161 | #define native_pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) |
162 | 162 | ||
163 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ | 163 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ |
164 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 164 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
165 | #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) | 165 | #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) |
166 | 166 | ||
167 | struct vm_area_struct; | ||
168 | |||
169 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
170 | { | ||
171 | if (!pte_young(*ptep)) | ||
172 | return 0; | ||
173 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); | ||
174 | } | ||
175 | |||
176 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
177 | { | ||
178 | clear_bit(_PAGE_BIT_RW, &ptep->pte); | ||
179 | } | ||
180 | |||
181 | /* | 167 | /* |
182 | * Macro to mark a page protection value as "uncacheable". | 168 | * Macro to mark a page protection value as "uncacheable". |
183 | */ | 169 | */ |
@@ -243,22 +229,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
243 | 229 | ||
244 | #define update_mmu_cache(vma,address,pte) do { } while (0) | 230 | #define update_mmu_cache(vma,address,pte) do { } while (0) |
245 | 231 | ||
246 | /* We only update the dirty/accessed state if we set | ||
247 | * the dirty bit by hand in the kernel, since the hardware | ||
248 | * will do the accessed bit for us, and we don't want to | ||
249 | * race with other CPU's that might be updating the dirty | ||
250 | * bit at the same time. */ | ||
251 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
252 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
253 | ({ \ | ||
254 | int __changed = !pte_same(*(__ptep), __entry); \ | ||
255 | if (__changed && __dirty) { \ | ||
256 | set_pte(__ptep, __entry); \ | ||
257 | flush_tlb_page(__vma, __address); \ | ||
258 | } \ | ||
259 | __changed; \ | ||
260 | }) | ||
261 | |||
262 | /* Encode and de-code a swap entry */ | 232 | /* Encode and de-code a swap entry */ |
263 | #define __swp_type(x) (((x).val >> 1) & 0x3f) | 233 | #define __swp_type(x) (((x).val >> 1) & 0x3f) |
264 | #define __swp_offset(x) ((x).val >> 8) | 234 | #define __swp_offset(x) ((x).val >> 8) |
@@ -290,12 +260,7 @@ pte_t *lookup_address(unsigned long addr); | |||
290 | #define kc_offset_to_vaddr(o) \ | 260 | #define kc_offset_to_vaddr(o) \ |
291 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) | 261 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) |
292 | 262 | ||
293 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
294 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
295 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
296 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
297 | #define __HAVE_ARCH_PTE_SAME | 263 | #define __HAVE_ARCH_PTE_SAME |
298 | #include <asm-generic/pgtable.h> | ||
299 | #endif /* !__ASSEMBLY__ */ | 264 | #endif /* !__ASSEMBLY__ */ |
300 | 265 | ||
301 | #endif /* _X86_64_PGTABLE_H */ | 266 | #endif /* _X86_64_PGTABLE_H */ |