diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-01-30 07:32:58 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:32:58 -0500 |
commit | 195466dc4b9b8a4cc89d37ea1211746f3afbc941 (patch) | |
tree | 38a4dc9e105d54cf285cdcbc141b424a2fc16f41 /include/asm-x86/pgtable_32.h | |
parent | e33287013585e96180c575288bf1db22bee47b52 (diff) |
x86: pgtable: unify pte accessors
Make various pte accessors common.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/pgtable_32.h')
-rw-r--r-- | include/asm-x86/pgtable_32.h | 101 |
1 files changed, 0 insertions, 101 deletions
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h index 109dad5e16ee..d4d238c10293 100644 --- a/include/asm-x86/pgtable_32.h +++ b/include/asm-x86/pgtable_32.h | |||
@@ -107,105 +107,6 @@ extern unsigned long pg0[]; | |||
107 | # include <asm/pgtable-2level.h> | 107 | # include <asm/pgtable-2level.h> |
108 | #endif | 108 | #endif |
109 | 109 | ||
110 | #ifndef CONFIG_PARAVIRT | ||
111 | /* | ||
112 | * Rules for using pte_update - it must be called after any PTE update which | ||
113 | * has not been done using the set_pte / clear_pte interfaces. It is used by | ||
114 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE | ||
115 | * updates should either be sets, clears, or set_pte_atomic for P->P | ||
116 | * transitions, which means this hook should only be called for user PTEs. | ||
117 | * This hook implies a P->P protection or access change has taken place, which | ||
118 | * requires a subsequent TLB flush. The notification can optionally be delayed | ||
119 | * until the TLB flush event by using the pte_update_defer form of the | ||
120 | * interface, but care must be taken to assure that the flush happens while | ||
121 | * still holding the same page table lock so that the shadow and primary pages | ||
122 | * do not become out of sync on SMP. | ||
123 | */ | ||
124 | #define pte_update(mm, addr, ptep) do { } while (0) | ||
125 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | ||
126 | #endif | ||
127 | |||
128 | /* local pte updates need not use xchg for locking */ | ||
129 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | ||
130 | { | ||
131 | pte_t res = *ptep; | ||
132 | |||
133 | /* Pure native function needs no input for mm, addr */ | ||
134 | native_pte_clear(NULL, 0, ptep); | ||
135 | return res; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * We only update the dirty/accessed state if we set | ||
140 | * the dirty bit by hand in the kernel, since the hardware | ||
141 | * will do the accessed bit for us, and we don't want to | ||
142 | * race with other CPU's that might be updating the dirty | ||
143 | * bit at the same time. | ||
144 | */ | ||
145 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
146 | #define ptep_set_access_flags(vma, address, ptep, entry, dirty) \ | ||
147 | ({ \ | ||
148 | int __changed = !pte_same(*(ptep), entry); \ | ||
149 | if (__changed && dirty) { \ | ||
150 | (ptep)->pte_low = (entry).pte_low; \ | ||
151 | pte_update_defer((vma)->vm_mm, (address), (ptep)); \ | ||
152 | flush_tlb_page(vma, address); \ | ||
153 | } \ | ||
154 | __changed; \ | ||
155 | }) | ||
156 | |||
157 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
158 | #define ptep_test_and_clear_young(vma, addr, ptep) ({ \ | ||
159 | int __ret = 0; \ | ||
160 | if (pte_young(*(ptep))) \ | ||
161 | __ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, \ | ||
162 | &(ptep)->pte_low); \ | ||
163 | if (__ret) \ | ||
164 | pte_update((vma)->vm_mm, addr, ptep); \ | ||
165 | __ret; \ | ||
166 | }) | ||
167 | |||
168 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | ||
169 | #define ptep_clear_flush_young(vma, address, ptep) \ | ||
170 | ({ \ | ||
171 | int __young; \ | ||
172 | __young = ptep_test_and_clear_young((vma), (address), (ptep)); \ | ||
173 | if (__young) \ | ||
174 | flush_tlb_page(vma, address); \ | ||
175 | __young; \ | ||
176 | }) | ||
177 | |||
178 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
179 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
180 | { | ||
181 | pte_t pte = native_ptep_get_and_clear(ptep); | ||
182 | pte_update(mm, addr, ptep); | ||
183 | return pte; | ||
184 | } | ||
185 | |||
186 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
187 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | ||
188 | { | ||
189 | pte_t pte; | ||
190 | if (full) { | ||
191 | /* | ||
192 | * Full address destruction in progress; paravirt does not | ||
193 | * care about updates and native needs no locking | ||
194 | */ | ||
195 | pte = native_local_ptep_get_and_clear(ptep); | ||
196 | } else { | ||
197 | pte = ptep_get_and_clear(mm, addr, ptep); | ||
198 | } | ||
199 | return pte; | ||
200 | } | ||
201 | |||
202 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
203 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
204 | { | ||
205 | clear_bit(_PAGE_BIT_RW, &ptep->pte_low); | ||
206 | pte_update(mm, addr, ptep); | ||
207 | } | ||
208 | |||
209 | /* | 110 | /* |
210 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | 111 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); |
211 | * | 112 | * |
@@ -359,6 +260,4 @@ static inline void paravirt_pagetable_setup_done(pgd_t *base) | |||
359 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 260 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
360 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 261 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
361 | 262 | ||
362 | #include <asm-generic/pgtable.h> | ||
363 | |||
364 | #endif /* _I386_PGTABLE_H */ | 263 | #endif /* _I386_PGTABLE_H */ |