aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-01-30 07:32:58 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:32:58 -0500
commit195466dc4b9b8a4cc89d37ea1211746f3afbc941 (patch)
tree38a4dc9e105d54cf285cdcbc141b424a2fc16f41
parente33287013585e96180c575288bf1db22bee47b52 (diff)
x86: pgtable: unify pte accessors
Make various pte accessors common. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/asm-x86/pgtable-2level.h1
-rw-r--r--include/asm-x86/pgtable-3level.h1
-rw-r--r--include/asm-x86/pgtable.h110
-rw-r--r--include/asm-x86/pgtable_32.h101
-rw-r--r--include/asm-x86/pgtable_64.h43
5 files changed, 113 insertions, 143 deletions
diff --git a/include/asm-x86/pgtable-2level.h b/include/asm-x86/pgtable-2level.h
index f949bb083089..65cdc8f9e3b3 100644
--- a/include/asm-x86/pgtable-2level.h
+++ b/include/asm-x86/pgtable-2level.h
@@ -33,7 +33,6 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
33#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval) 33#define set_pte_atomic(pteptr, pteval) set_pte(pteptr,pteval)
34#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval) 34#define set_pte_present(mm,addr,ptep,pteval) set_pte_at(mm,addr,ptep,pteval)
35 35
36#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
37#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) 36#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
38 37
39static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp) 38static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *xp)
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h
index 3da96f792dd0..a123e687112f 100644
--- a/include/asm-x86/pgtable-3level.h
+++ b/include/asm-x86/pgtable-3level.h
@@ -101,7 +101,6 @@ static inline void native_pmd_clear(pmd_t *pmd)
101#define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte) 101#define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte)
102#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) 102#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
103#define set_pud(pudp, pud) native_set_pud(pudp, pud) 103#define set_pud(pudp, pud) native_set_pud(pudp, pud)
104#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
105#define pmd_clear(pmd) native_pmd_clear(pmd) 104#define pmd_clear(pmd) native_pmd_clear(pmd)
106#endif 105#endif
107 106
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 6452286e71a1..62fa856c2491 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -116,6 +116,7 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
116#define __S111 PAGE_SHARED_EXEC 116#define __S111 PAGE_SHARED_EXEC
117 117
118#ifndef __ASSEMBLY__ 118#ifndef __ASSEMBLY__
119
119/* 120/*
120 * The following only work if pte_present() is true. 121 * The following only work if pte_present() is true.
121 * Undefined behaviour if not.. 122 * Undefined behaviour if not..
@@ -169,7 +170,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
169 return __pte(val); 170 return __pte(val);
170} 171}
171 172
172
173#endif /* __ASSEMBLY__ */ 173#endif /* __ASSEMBLY__ */
174 174
175#ifdef CONFIG_X86_32 175#ifdef CONFIG_X86_32
@@ -178,4 +178,112 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
178# include "pgtable_64.h" 178# include "pgtable_64.h"
179#endif 179#endif
180 180
181#ifndef __ASSEMBLY__
182
183#ifndef CONFIG_PARAVIRT
184/*
185 * Rules for using pte_update - it must be called after any PTE update which
186 * has not been done using the set_pte / clear_pte interfaces. It is used by
187 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
188 * updates should either be sets, clears, or set_pte_atomic for P->P
189 * transitions, which means this hook should only be called for user PTEs.
190 * This hook implies a P->P protection or access change has taken place, which
191 * requires a subsequent TLB flush. The notification can optionally be delayed
192 * until the TLB flush event by using the pte_update_defer form of the
193 * interface, but care must be taken to assure that the flush happens while
194 * still holding the same page table lock so that the shadow and primary pages
195 * do not become out of sync on SMP.
196 */
197#define pte_update(mm, addr, ptep) do { } while (0)
198#define pte_update_defer(mm, addr, ptep) do { } while (0)
199#endif
200
201/* local pte updates need not use xchg for locking */
202static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
203{
204 pte_t res = *ptep;
205
206 /* Pure native function needs no input for mm, addr */
207 native_pte_clear(NULL, 0, ptep);
208 return res;
209}
210
211/*
212 * We only update the dirty/accessed state if we set
213 * the dirty bit by hand in the kernel, since the hardware
214 * will do the accessed bit for us, and we don't want to
215 * race with other CPU's that might be updating the dirty
216 * bit at the same time.
217 */
218#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
219#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
220({ \
221 int __changed = !pte_same(*(ptep), entry); \
222 if (__changed && dirty) { \
223 *ptep = entry; \
224 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
225 flush_tlb_page(vma, address); \
226 } \
227 __changed; \
228})
229
230#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
231#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
232 int __ret = 0; \
233 if (pte_young(*(ptep))) \
234 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
235 &(ptep)->pte); \
236 if (__ret) \
237 pte_update((vma)->vm_mm, addr, ptep); \
238 __ret; \
239})
240
241#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
242#define ptep_clear_flush_young(vma, address, ptep) \
243({ \
244 int __young; \
245 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
246 if (__young) \
247 flush_tlb_page(vma, address); \
248 __young; \
249})
250
251#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
252static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
253{
254 pte_t pte = native_ptep_get_and_clear(ptep);
255 pte_update(mm, addr, ptep);
256 return pte;
257}
258
259#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
260static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
261{
262 pte_t pte;
263 if (full) {
264 /*
265 * Full address destruction in progress; paravirt does not
266 * care about updates and native needs no locking
267 */
268 pte = native_local_ptep_get_and_clear(ptep);
269 } else {
270 pte = ptep_get_and_clear(mm, addr, ptep);
271 }
272 return pte;
273}
274
275#define __HAVE_ARCH_PTEP_SET_WRPROTECT
276static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
277{
278 clear_bit(_PAGE_BIT_RW, &ptep->pte);
279 pte_update(mm, addr, ptep);
280}
281
282#ifndef CONFIG_PARAVIRT
283#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
284#endif /* !CONFIG_PARAVIRT */
285
286#include <asm-generic/pgtable.h>
287#endif /* __ASSEMBLY__ */
288
181#endif /* _ASM_X86_PGTABLE_H */ 289#endif /* _ASM_X86_PGTABLE_H */
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 109dad5e16ee..d4d238c10293 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -107,105 +107,6 @@ extern unsigned long pg0[];
107# include <asm/pgtable-2level.h> 107# include <asm/pgtable-2level.h>
108#endif 108#endif
109 109
110#ifndef CONFIG_PARAVIRT
111/*
112 * Rules for using pte_update - it must be called after any PTE update which
113 * has not been done using the set_pte / clear_pte interfaces. It is used by
114 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
115 * updates should either be sets, clears, or set_pte_atomic for P->P
116 * transitions, which means this hook should only be called for user PTEs.
117 * This hook implies a P->P protection or access change has taken place, which
118 * requires a subsequent TLB flush. The notification can optionally be delayed
119 * until the TLB flush event by using the pte_update_defer form of the
120 * interface, but care must be taken to assure that the flush happens while
121 * still holding the same page table lock so that the shadow and primary pages
122 * do not become out of sync on SMP.
123 */
124#define pte_update(mm, addr, ptep) do { } while (0)
125#define pte_update_defer(mm, addr, ptep) do { } while (0)
126#endif
127
128/* local pte updates need not use xchg for locking */
129static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
130{
131 pte_t res = *ptep;
132
133 /* Pure native function needs no input for mm, addr */
134 native_pte_clear(NULL, 0, ptep);
135 return res;
136}
137
138/*
139 * We only update the dirty/accessed state if we set
140 * the dirty bit by hand in the kernel, since the hardware
141 * will do the accessed bit for us, and we don't want to
142 * race with other CPU's that might be updating the dirty
143 * bit at the same time.
144 */
145#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
146#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
147({ \
148 int __changed = !pte_same(*(ptep), entry); \
149 if (__changed && dirty) { \
150 (ptep)->pte_low = (entry).pte_low; \
151 pte_update_defer((vma)->vm_mm, (address), (ptep)); \
152 flush_tlb_page(vma, address); \
153 } \
154 __changed; \
155})
156
157#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
158#define ptep_test_and_clear_young(vma, addr, ptep) ({ \
159 int __ret = 0; \
160 if (pte_young(*(ptep))) \
161 __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \
162 &(ptep)->pte_low); \
163 if (__ret) \
164 pte_update((vma)->vm_mm, addr, ptep); \
165 __ret; \
166})
167
168#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
169#define ptep_clear_flush_young(vma, address, ptep) \
170({ \
171 int __young; \
172 __young = ptep_test_and_clear_young((vma), (address), (ptep)); \
173 if (__young) \
174 flush_tlb_page(vma, address); \
175 __young; \
176})
177
178#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
179static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
180{
181 pte_t pte = native_ptep_get_and_clear(ptep);
182 pte_update(mm, addr, ptep);
183 return pte;
184}
185
186#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
187static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
188{
189 pte_t pte;
190 if (full) {
191 /*
192 * Full address destruction in progress; paravirt does not
193 * care about updates and native needs no locking
194 */
195 pte = native_local_ptep_get_and_clear(ptep);
196 } else {
197 pte = ptep_get_and_clear(mm, addr, ptep);
198 }
199 return pte;
200}
201
202#define __HAVE_ARCH_PTEP_SET_WRPROTECT
203static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
204{
205 clear_bit(_PAGE_BIT_RW, &ptep->pte_low);
206 pte_update(mm, addr, ptep);
207}
208
209/* 110/*
210 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); 111 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
211 * 112 *
@@ -359,6 +260,4 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base)
359#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 260#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
360 remap_pfn_range(vma, vaddr, pfn, size, prot) 261 remap_pfn_range(vma, vaddr, pfn, size, prot)
361 262
362#include <asm-generic/pgtable.h>
363
364#endif /* _I386_PGTABLE_H */ 263#endif /* _I386_PGTABLE_H */
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h
index 77038d8e9bfd..987f51f684a5 100644
--- a/include/asm-x86/pgtable_64.h
+++ b/include/asm-x86/pgtable_64.h
@@ -101,18 +101,18 @@ static inline void pgd_clear (pgd_t * pgd)
101 set_pgd(pgd, __pgd(0)); 101 set_pgd(pgd, __pgd(0));
102} 102}
103 103
104#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) 104#define native_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte, 0))
105 105
106struct mm_struct; 106struct mm_struct;
107 107
108static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) 108static inline pte_t native_ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
109{ 109{
110 pte_t pte; 110 pte_t pte;
111 if (full) { 111 if (full) {
112 pte = *ptep; 112 pte = *ptep;
113 *ptep = __pte(0); 113 *ptep = __pte(0);
114 } else { 114 } else {
115 pte = ptep_get_and_clear(mm, addr, ptep); 115 pte = native_ptep_get_and_clear(ptep);
116 } 116 }
117 return pte; 117 return pte;
118} 118}
@@ -158,26 +158,12 @@ static inline unsigned long pmd_bad(pmd_t pmd)
158 158
159#define pte_none(x) (!pte_val(x)) 159#define pte_none(x) (!pte_val(x))
160#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) 160#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
161#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) 161#define native_pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
162 162
163#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ 163#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */
164#define pte_page(x) pfn_to_page(pte_pfn(x)) 164#define pte_page(x) pfn_to_page(pte_pfn(x))
165#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) 165#define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
166 166
167struct vm_area_struct;
168
169static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
170{
171 if (!pte_young(*ptep))
172 return 0;
173 return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte);
174}
175
176static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
177{
178 clear_bit(_PAGE_BIT_RW, &ptep->pte);
179}
180
181/* 167/*
182 * Macro to mark a page protection value as "uncacheable". 168 * Macro to mark a page protection value as "uncacheable".
183 */ 169 */
@@ -243,22 +229,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
243 229
244#define update_mmu_cache(vma,address,pte) do { } while (0) 230#define update_mmu_cache(vma,address,pte) do { } while (0)
245 231
246/* We only update the dirty/accessed state if we set
247 * the dirty bit by hand in the kernel, since the hardware
248 * will do the accessed bit for us, and we don't want to
249 * race with other CPU's that might be updating the dirty
250 * bit at the same time. */
251#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
252#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
253({ \
254 int __changed = !pte_same(*(__ptep), __entry); \
255 if (__changed && __dirty) { \
256 set_pte(__ptep, __entry); \
257 flush_tlb_page(__vma, __address); \
258 } \
259 __changed; \
260})
261
262/* Encode and de-code a swap entry */ 232/* Encode and de-code a swap entry */
263#define __swp_type(x) (((x).val >> 1) & 0x3f) 233#define __swp_type(x) (((x).val >> 1) & 0x3f)
264#define __swp_offset(x) ((x).val >> 8) 234#define __swp_offset(x) ((x).val >> 8)
@@ -290,12 +260,7 @@ pte_t *lookup_address(unsigned long addr);
290#define kc_offset_to_vaddr(o) \ 260#define kc_offset_to_vaddr(o) \
291 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) 261 (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
292 262
293#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
294#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
295#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
296#define __HAVE_ARCH_PTEP_SET_WRPROTECT
297#define __HAVE_ARCH_PTE_SAME 263#define __HAVE_ARCH_PTE_SAME
298#include <asm-generic/pgtable.h>
299#endif /* !__ASSEMBLY__ */ 264#endif /* !__ASSEMBLY__ */
300 265
301#endif /* _X86_64_PGTABLE_H */ 266#endif /* _X86_64_PGTABLE_H */