aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/pgtable.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86/pgtable.h')
-rw-r--r--include/asm-x86/pgtable.h183
1 files changed, 114 insertions, 69 deletions
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index 801b31f71452..04caa2f544df 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -18,32 +18,30 @@
18#define _PAGE_BIT_UNUSED2 10 18#define _PAGE_BIT_UNUSED2 10
19#define _PAGE_BIT_UNUSED3 11 19#define _PAGE_BIT_UNUSED3 11
20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ 20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
21#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
21#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ 22#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
22 23
23/* 24#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
24 * Note: we use _AC(1, L) instead of _AC(1, UL) so that we get a 25#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
25 * sign-extended value on 32-bit with all 1's in the upper word, 26#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
26 * which preserves the upper pte values on 64-bit ptes: 27#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
27 */ 28#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
28#define _PAGE_PRESENT (_AC(1, L)<<_PAGE_BIT_PRESENT) 29#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
29#define _PAGE_RW (_AC(1, L)<<_PAGE_BIT_RW) 30#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
30#define _PAGE_USER (_AC(1, L)<<_PAGE_BIT_USER) 31#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
31#define _PAGE_PWT (_AC(1, L)<<_PAGE_BIT_PWT) 32#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
32#define _PAGE_PCD (_AC(1, L)<<_PAGE_BIT_PCD) 33#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
33#define _PAGE_ACCESSED (_AC(1, L)<<_PAGE_BIT_ACCESSED) 34#define _PAGE_UNUSED2 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2)
34#define _PAGE_DIRTY (_AC(1, L)<<_PAGE_BIT_DIRTY) 35#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
35#define _PAGE_PSE (_AC(1, L)<<_PAGE_BIT_PSE) /* 2MB page */ 36#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
36#define _PAGE_GLOBAL (_AC(1, L)<<_PAGE_BIT_GLOBAL) /* Global TLB entry */ 37#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
37#define _PAGE_UNUSED1 (_AC(1, L)<<_PAGE_BIT_UNUSED1) 38#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
38#define _PAGE_UNUSED2 (_AC(1, L)<<_PAGE_BIT_UNUSED2) 39#define __HAVE_ARCH_PTE_SPECIAL
39#define _PAGE_UNUSED3 (_AC(1, L)<<_PAGE_BIT_UNUSED3)
40#define _PAGE_PAT (_AC(1, L)<<_PAGE_BIT_PAT)
41#define _PAGE_PAT_LARGE (_AC(1, L)<<_PAGE_BIT_PAT_LARGE)
42 40
43#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 41#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
44#define _PAGE_NX (_AC(1, ULL) << _PAGE_BIT_NX) 42#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
45#else 43#else
46#define _PAGE_NX 0 44#define _PAGE_NX (_AT(pteval_t, 0))
47#endif 45#endif
48 46
49/* If _PAGE_PRESENT is clear, we use these: */ 47/* If _PAGE_PRESENT is clear, we use these: */
@@ -57,7 +55,9 @@
57#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ 55#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
58 _PAGE_DIRTY) 56 _PAGE_DIRTY)
59 57
60#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) 58/* Set of bits not changed in pte_modify */
59#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
60 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
61 61
62#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) 62#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
63#define _PAGE_CACHE_WB (0) 63#define _PAGE_CACHE_WB (0)
@@ -81,19 +81,9 @@
81#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ 81#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
82 _PAGE_ACCESSED) 82 _PAGE_ACCESSED)
83 83
84#ifdef CONFIG_X86_32
85#define _PAGE_KERNEL_EXEC \
86 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
87#define _PAGE_KERNEL (_PAGE_KERNEL_EXEC | _PAGE_NX)
88
89#ifndef __ASSEMBLY__
90extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
91#endif /* __ASSEMBLY__ */
92#else
93#define __PAGE_KERNEL_EXEC \ 84#define __PAGE_KERNEL_EXEC \
94 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) 85 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
95#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) 86#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
96#endif
97 87
98#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) 88#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
99#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) 89#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
@@ -104,26 +94,22 @@ extern pteval_t __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
104#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) 94#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
105#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) 95#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
106#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) 96#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
97#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
107#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) 98#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
108 99
109#ifdef CONFIG_X86_32 100#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
110# define MAKE_GLOBAL(x) __pgprot((x)) 101#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
111#else 102#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
112# define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) 103#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
113#endif 104#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
114 105#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
115#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) 106#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
116#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) 107#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
117#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) 108#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
118#define PAGE_KERNEL_RX MAKE_GLOBAL(__PAGE_KERNEL_RX) 109#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
119#define PAGE_KERNEL_WC MAKE_GLOBAL(__PAGE_KERNEL_WC) 110#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
120#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) 111#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
121#define PAGE_KERNEL_UC_MINUS MAKE_GLOBAL(__PAGE_KERNEL_UC_MINUS) 112#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
122#define PAGE_KERNEL_EXEC_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_EXEC_NOCACHE)
123#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
124#define PAGE_KERNEL_LARGE_EXEC MAKE_GLOBAL(__PAGE_KERNEL_LARGE_EXEC)
125#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
126#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
127 113
128/* xwr */ 114/* xwr */
129#define __P000 PAGE_NONE 115#define __P000 PAGE_NONE
@@ -162,42 +148,42 @@ extern struct list_head pgd_list;
162 */ 148 */
163static inline int pte_dirty(pte_t pte) 149static inline int pte_dirty(pte_t pte)
164{ 150{
165 return pte_val(pte) & _PAGE_DIRTY; 151 return pte_flags(pte) & _PAGE_DIRTY;
166} 152}
167 153
168static inline int pte_young(pte_t pte) 154static inline int pte_young(pte_t pte)
169{ 155{
170 return pte_val(pte) & _PAGE_ACCESSED; 156 return pte_flags(pte) & _PAGE_ACCESSED;
171} 157}
172 158
173static inline int pte_write(pte_t pte) 159static inline int pte_write(pte_t pte)
174{ 160{
175 return pte_val(pte) & _PAGE_RW; 161 return pte_flags(pte) & _PAGE_RW;
176} 162}
177 163
178static inline int pte_file(pte_t pte) 164static inline int pte_file(pte_t pte)
179{ 165{
180 return pte_val(pte) & _PAGE_FILE; 166 return pte_flags(pte) & _PAGE_FILE;
181} 167}
182 168
183static inline int pte_huge(pte_t pte) 169static inline int pte_huge(pte_t pte)
184{ 170{
185 return pte_val(pte) & _PAGE_PSE; 171 return pte_flags(pte) & _PAGE_PSE;
186} 172}
187 173
188static inline int pte_global(pte_t pte) 174static inline int pte_global(pte_t pte)
189{ 175{
190 return pte_val(pte) & _PAGE_GLOBAL; 176 return pte_flags(pte) & _PAGE_GLOBAL;
191} 177}
192 178
193static inline int pte_exec(pte_t pte) 179static inline int pte_exec(pte_t pte)
194{ 180{
195 return !(pte_val(pte) & _PAGE_NX); 181 return !(pte_flags(pte) & _PAGE_NX);
196} 182}
197 183
198static inline int pte_special(pte_t pte) 184static inline int pte_special(pte_t pte)
199{ 185{
200 return 0; 186 return pte_val(pte) & _PAGE_SPECIAL;
201} 187}
202 188
203static inline int pmd_large(pmd_t pte) 189static inline int pmd_large(pmd_t pte)
@@ -208,22 +194,22 @@ static inline int pmd_large(pmd_t pte)
208 194
209static inline pte_t pte_mkclean(pte_t pte) 195static inline pte_t pte_mkclean(pte_t pte)
210{ 196{
211 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_DIRTY); 197 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
212} 198}
213 199
214static inline pte_t pte_mkold(pte_t pte) 200static inline pte_t pte_mkold(pte_t pte)
215{ 201{
216 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_ACCESSED); 202 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
217} 203}
218 204
219static inline pte_t pte_wrprotect(pte_t pte) 205static inline pte_t pte_wrprotect(pte_t pte)
220{ 206{
221 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_RW); 207 return __pte(pte_val(pte) & ~_PAGE_RW);
222} 208}
223 209
224static inline pte_t pte_mkexec(pte_t pte) 210static inline pte_t pte_mkexec(pte_t pte)
225{ 211{
226 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_NX); 212 return __pte(pte_val(pte) & ~_PAGE_NX);
227} 213}
228 214
229static inline pte_t pte_mkdirty(pte_t pte) 215static inline pte_t pte_mkdirty(pte_t pte)
@@ -248,7 +234,7 @@ static inline pte_t pte_mkhuge(pte_t pte)
248 234
249static inline pte_t pte_clrhuge(pte_t pte) 235static inline pte_t pte_clrhuge(pte_t pte)
250{ 236{
251 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_PSE); 237 return __pte(pte_val(pte) & ~_PAGE_PSE);
252} 238}
253 239
254static inline pte_t pte_mkglobal(pte_t pte) 240static inline pte_t pte_mkglobal(pte_t pte)
@@ -258,12 +244,12 @@ static inline pte_t pte_mkglobal(pte_t pte)
258 244
259static inline pte_t pte_clrglobal(pte_t pte) 245static inline pte_t pte_clrglobal(pte_t pte)
260{ 246{
261 return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL); 247 return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
262} 248}
263 249
264static inline pte_t pte_mkspecial(pte_t pte) 250static inline pte_t pte_mkspecial(pte_t pte)
265{ 251{
266 return pte; 252 return __pte(pte_val(pte) | _PAGE_SPECIAL);
267} 253}
268 254
269extern pteval_t __supported_pte_mask; 255extern pteval_t __supported_pte_mask;
@@ -288,13 +274,22 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
288 * Chop off the NX bit (if present), and add the NX portion of 274 * Chop off the NX bit (if present), and add the NX portion of
289 * the newprot (if present): 275 * the newprot (if present):
290 */ 276 */
291 val &= _PAGE_CHG_MASK & ~_PAGE_NX; 277 val &= _PAGE_CHG_MASK;
292 val |= pgprot_val(newprot) & __supported_pte_mask; 278 val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
293 279
294 return __pte(val); 280 return __pte(val);
295} 281}
296 282
297#define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX)) 283/* mprotect needs to preserve PAT bits when updating vm_page_prot */
284#define pgprot_modify pgprot_modify
285static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
286{
287 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
288 pgprotval_t addbits = pgprot_val(newprot);
289 return __pgprot(preservebits | addbits);
290}
291
292#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
298 293
299#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) 294#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
300 295
@@ -307,6 +302,17 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
307 unsigned long size, pgprot_t *vma_prot); 302 unsigned long size, pgprot_t *vma_prot);
308#endif 303#endif
309 304
305/* Install a pte for a particular vaddr in kernel space. */
306void set_pte_vaddr(unsigned long vaddr, pte_t pte);
307
308#ifdef CONFIG_X86_32
309extern void native_pagetable_setup_start(pgd_t *base);
310extern void native_pagetable_setup_done(pgd_t *base);
311#else
312static inline void native_pagetable_setup_start(pgd_t *base) {}
313static inline void native_pagetable_setup_done(pgd_t *base) {}
314#endif
315
310#ifdef CONFIG_PARAVIRT 316#ifdef CONFIG_PARAVIRT
311#include <asm/paravirt.h> 317#include <asm/paravirt.h>
312#else /* !CONFIG_PARAVIRT */ 318#else /* !CONFIG_PARAVIRT */
@@ -338,6 +344,16 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
338 344
339#define pte_update(mm, addr, ptep) do { } while (0) 345#define pte_update(mm, addr, ptep) do { } while (0)
340#define pte_update_defer(mm, addr, ptep) do { } while (0) 346#define pte_update_defer(mm, addr, ptep) do { } while (0)
347
348static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
349{
350 native_pagetable_setup_start(base);
351}
352
353static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
354{
355 native_pagetable_setup_done(base);
356}
341#endif /* CONFIG_PARAVIRT */ 357#endif /* CONFIG_PARAVIRT */
342 358
343#endif /* __ASSEMBLY__ */ 359#endif /* __ASSEMBLY__ */
@@ -348,6 +364,26 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
348# include "pgtable_64.h" 364# include "pgtable_64.h"
349#endif 365#endif
350 366
367/*
368 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
369 *
370 * this macro returns the index of the entry in the pgd page which would
371 * control the given virtual address
372 */
373#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
374
375/*
376 * pgd_offset() returns a (pgd_t *)
377 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
378 */
379#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
380/*
381 * a shortcut which implies the use of the kernel's pgd, instead
382 * of a process's
383 */
384#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
385
386
351#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) 387#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
352#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) 388#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
353 389
@@ -358,8 +394,15 @@ enum {
358 PG_LEVEL_4K, 394 PG_LEVEL_4K,
359 PG_LEVEL_2M, 395 PG_LEVEL_2M,
360 PG_LEVEL_1G, 396 PG_LEVEL_1G,
397 PG_LEVEL_NUM
361}; 398};
362 399
400#ifdef CONFIG_PROC_FS
401extern void update_page_count(int level, unsigned long pages);
402#else
403static inline void update_page_count(int level, unsigned long pages) { }
404#endif
405
363/* 406/*
364 * Helper function that returns the kernel pagetable entry controlling 407 * Helper function that returns the kernel pagetable entry controlling
365 * the virtual address 'address'. NULL means no pagetable entry present. 408 * the virtual address 'address'. NULL means no pagetable entry present.
@@ -409,6 +452,8 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
409 * race with other CPU's that might be updating the dirty 452 * race with other CPU's that might be updating the dirty
410 * bit at the same time. 453 * bit at the same time.
411 */ 454 */
455struct vm_area_struct;
456
412#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 457#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
413extern int ptep_set_access_flags(struct vm_area_struct *vma, 458extern int ptep_set_access_flags(struct vm_area_struct *vma,
414 unsigned long address, pte_t *ptep, 459 unsigned long address, pte_t *ptep,