diff options
author | Joe Perches <joe@perches.com> | 2008-03-23 04:03:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:41:26 -0400 |
commit | 3cbaeafeb10e38bce6c8d4764a254260d5a564bd (patch) | |
tree | 5b991c427aad27f6b7f46c7d6a07512907b74be2 /include/asm-x86/pgtable.h | |
parent | 7f94401e439dc1137319c48dfec0285f681eb3ad (diff) |
include/asm-x86/pgtable.h: checkpatch cleanups - formatting only
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/pgtable.h')
-rw-r--r-- | include/asm-x86/pgtable.h | 156 |
1 files changed, 120 insertions, 36 deletions
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index e814cfe96af2..2ce765070464 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h | |||
@@ -48,12 +48,15 @@ | |||
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | /* If _PAGE_PRESENT is clear, we use these: */ | 50 | /* If _PAGE_PRESENT is clear, we use these: */ |
51 | #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, saved PTE; unset:swap */ | 51 | #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping, |
52 | * saved PTE; unset:swap */ | ||
52 | #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; | 53 | #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE; |
53 | pte_present gives true */ | 54 | pte_present gives true */ |
54 | 55 | ||
55 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | 56 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
56 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | 57 | _PAGE_ACCESSED | _PAGE_DIRTY) |
58 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ | ||
59 | _PAGE_DIRTY) | ||
57 | 60 | ||
58 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | 61 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
59 | 62 | ||
@@ -64,14 +67,20 @@ | |||
64 | #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) | 67 | #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) |
65 | 68 | ||
66 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | 69 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) |
67 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | 70 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ |
68 | 71 | _PAGE_ACCESSED | _PAGE_NX) | |
69 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | 72 | |
70 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | 73 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ |
71 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 74 | _PAGE_USER | _PAGE_ACCESSED) |
75 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
76 | _PAGE_ACCESSED | _PAGE_NX) | ||
77 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
78 | _PAGE_ACCESSED) | ||
72 | #define PAGE_COPY PAGE_COPY_NOEXEC | 79 | #define PAGE_COPY PAGE_COPY_NOEXEC |
73 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | 80 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ |
74 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | 81 | _PAGE_ACCESSED | _PAGE_NX) |
82 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
83 | _PAGE_ACCESSED) | ||
75 | 84 | ||
76 | #ifdef CONFIG_X86_32 | 85 | #ifdef CONFIG_X86_32 |
77 | #define _PAGE_KERNEL_EXEC \ | 86 | #define _PAGE_KERNEL_EXEC \ |
@@ -142,7 +151,7 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC; | |||
142 | * ZERO_PAGE is a global shared page that is always zero: used | 151 | * ZERO_PAGE is a global shared page that is always zero: used |
143 | * for zero-mapped memory areas etc.. | 152 | * for zero-mapped memory areas etc.. |
144 | */ | 153 | */ |
145 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | 154 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
146 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 155 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
147 | 156 | ||
148 | extern spinlock_t pgd_lock; | 157 | extern spinlock_t pgd_lock; |
@@ -152,30 +161,101 @@ extern struct list_head pgd_list; | |||
152 | * The following only work if pte_present() is true. | 161 | * The following only work if pte_present() is true. |
153 | * Undefined behaviour if not.. | 162 | * Undefined behaviour if not.. |
154 | */ | 163 | */ |
155 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | 164 | static inline int pte_dirty(pte_t pte) |
156 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | 165 | { |
157 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | 166 | return pte_val(pte) & _PAGE_DIRTY; |
158 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 167 | } |
159 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } | 168 | |
160 | static inline int pte_global(pte_t pte) { return pte_val(pte) & _PAGE_GLOBAL; } | 169 | static inline int pte_young(pte_t pte) |
161 | static inline int pte_exec(pte_t pte) { return !(pte_val(pte) & _PAGE_NX); } | 170 | { |
162 | 171 | return pte_val(pte) & _PAGE_ACCESSED; | |
163 | static inline int pmd_large(pmd_t pte) { | 172 | } |
164 | return (pmd_val(pte) & (_PAGE_PSE|_PAGE_PRESENT)) == | 173 | |
165 | (_PAGE_PSE|_PAGE_PRESENT); | 174 | static inline int pte_write(pte_t pte) |
175 | { | ||
176 | return pte_val(pte) & _PAGE_RW; | ||
177 | } | ||
178 | |||
179 | static inline int pte_file(pte_t pte) | ||
180 | { | ||
181 | return pte_val(pte) & _PAGE_FILE; | ||
182 | } | ||
183 | |||
184 | static inline int pte_huge(pte_t pte) | ||
185 | { | ||
186 | return pte_val(pte) & _PAGE_PSE; | ||
166 | } | 187 | } |
167 | 188 | ||
168 | static inline pte_t pte_mkclean(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); } | 189 | static inline int pte_global(pte_t pte) |
169 | static inline pte_t pte_mkold(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); } | 190 | { |
170 | static inline pte_t pte_wrprotect(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); } | 191 | return pte_val(pte) & _PAGE_GLOBAL; |
171 | static inline pte_t pte_mkexec(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); } | 192 | } |
172 | static inline pte_t pte_mkdirty(pte_t pte) { return __pte(pte_val(pte) | _PAGE_DIRTY); } | 193 | |
173 | static inline pte_t pte_mkyoung(pte_t pte) { return __pte(pte_val(pte) | _PAGE_ACCESSED); } | 194 | static inline int pte_exec(pte_t pte) |
174 | static inline pte_t pte_mkwrite(pte_t pte) { return __pte(pte_val(pte) | _PAGE_RW); } | 195 | { |
175 | static inline pte_t pte_mkhuge(pte_t pte) { return __pte(pte_val(pte) | _PAGE_PSE); } | 196 | return !(pte_val(pte) & _PAGE_NX); |
176 | static inline pte_t pte_clrhuge(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); } | 197 | } |
177 | static inline pte_t pte_mkglobal(pte_t pte) { return __pte(pte_val(pte) | _PAGE_GLOBAL); } | 198 | |
178 | static inline pte_t pte_clrglobal(pte_t pte) { return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); } | 199 | static inline int pmd_large(pmd_t pte) |
200 | { | ||
201 | return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == | ||
202 | (_PAGE_PSE | _PAGE_PRESENT); | ||
203 | } | ||
204 | |||
205 | static inline pte_t pte_mkclean(pte_t pte) | ||
206 | { | ||
207 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); | ||
208 | } | ||
209 | |||
210 | static inline pte_t pte_mkold(pte_t pte) | ||
211 | { | ||
212 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); | ||
213 | } | ||
214 | |||
215 | static inline pte_t pte_wrprotect(pte_t pte) | ||
216 | { | ||
217 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); | ||
218 | } | ||
219 | |||
220 | static inline pte_t pte_mkexec(pte_t pte) | ||
221 | { | ||
222 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); | ||
223 | } | ||
224 | |||
225 | static inline pte_t pte_mkdirty(pte_t pte) | ||
226 | { | ||
227 | return __pte(pte_val(pte) | _PAGE_DIRTY); | ||
228 | } | ||
229 | |||
230 | static inline pte_t pte_mkyoung(pte_t pte) | ||
231 | { | ||
232 | return __pte(pte_val(pte) | _PAGE_ACCESSED); | ||
233 | } | ||
234 | |||
235 | static inline pte_t pte_mkwrite(pte_t pte) | ||
236 | { | ||
237 | return __pte(pte_val(pte) | _PAGE_RW); | ||
238 | } | ||
239 | |||
240 | static inline pte_t pte_mkhuge(pte_t pte) | ||
241 | { | ||
242 | return __pte(pte_val(pte) | _PAGE_PSE); | ||
243 | } | ||
244 | |||
245 | static inline pte_t pte_clrhuge(pte_t pte) | ||
246 | { | ||
247 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); | ||
248 | } | ||
249 | |||
250 | static inline pte_t pte_mkglobal(pte_t pte) | ||
251 | { | ||
252 | return __pte(pte_val(pte) | _PAGE_GLOBAL); | ||
253 | } | ||
254 | |||
255 | static inline pte_t pte_clrglobal(pte_t pte) | ||
256 | { | ||
257 | return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); | ||
258 | } | ||
179 | 259 | ||
180 | extern pteval_t __supported_pte_mask; | 260 | extern pteval_t __supported_pte_mask; |
181 | 261 | ||
@@ -342,7 +422,8 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
342 | }) | 422 | }) |
343 | 423 | ||
344 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 424 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
345 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 425 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
426 | pte_t *ptep) | ||
346 | { | 427 | { |
347 | pte_t pte = native_ptep_get_and_clear(ptep); | 428 | pte_t pte = native_ptep_get_and_clear(ptep); |
348 | pte_update(mm, addr, ptep); | 429 | pte_update(mm, addr, ptep); |
@@ -350,7 +431,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, | |||
350 | } | 431 | } |
351 | 432 | ||
352 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | 433 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL |
353 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | 434 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
435 | unsigned long addr, pte_t *ptep, | ||
436 | int full) | ||
354 | { | 437 | { |
355 | pte_t pte; | 438 | pte_t pte; |
356 | if (full) { | 439 | if (full) { |
@@ -366,7 +449,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
366 | } | 449 | } |
367 | 450 | ||
368 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 451 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
369 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 452 | static inline void ptep_set_wrprotect(struct mm_struct *mm, |
453 | unsigned long addr, pte_t *ptep) | ||
370 | { | 454 | { |
371 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); | 455 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); |
372 | pte_update(mm, addr, ptep); | 456 | pte_update(mm, addr, ptep); |