diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2009-02-08 21:46:18 -0500 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-02-11 17:54:09 -0500 |
commit | 8d19c99faf6165ef095138dd595d46b9bbb34055 (patch) | |
tree | 91e1ff606c3649f1e65a998abb87999d7fadfcdd /arch | |
parent | b924a28138572f03bc8647c2be8f876d27e2666a (diff) |
Split pgtable.h into pgtable_types.h and pgtable.h
Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/pgtable.h | 212 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_types.h | 220 |
2 files changed, 221 insertions, 211 deletions
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 860f1b635c40..10404e7bf32d 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -3,164 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | 5 | ||
6 | #define FIRST_USER_ADDRESS 0 | 6 | #include <asm/pgtable_types.h> |
7 | |||
8 | #define _PAGE_BIT_PRESENT 0 /* is present */ | ||
9 | #define _PAGE_BIT_RW 1 /* writeable */ | ||
10 | #define _PAGE_BIT_USER 2 /* userspace addressable */ | ||
11 | #define _PAGE_BIT_PWT 3 /* page write through */ | ||
12 | #define _PAGE_BIT_PCD 4 /* page cache disabled */ | ||
13 | #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ | ||
14 | #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ | ||
15 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ | ||
16 | #define _PAGE_BIT_PAT 7 /* on 4KB pages */ | ||
17 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ | ||
18 | #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ | ||
19 | #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ | ||
20 | #define _PAGE_BIT_UNUSED3 11 | ||
21 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ | ||
22 | #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 | ||
23 | #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 | ||
24 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ | ||
25 | |||
26 | /* If _PAGE_BIT_PRESENT is clear, we use these: */ | ||
27 | /* - if the user mapped it with PROT_NONE; pte_present gives true */ | ||
28 | #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL | ||
29 | /* - set: nonlinear file mapping, saved PTE; unset:swap */ | ||
30 | #define _PAGE_BIT_FILE _PAGE_BIT_DIRTY | ||
31 | |||
32 | #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) | ||
33 | #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) | ||
34 | #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER) | ||
35 | #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT) | ||
36 | #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD) | ||
37 | #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED) | ||
38 | #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) | ||
39 | #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) | ||
40 | #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) | ||
41 | #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) | ||
42 | #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) | ||
43 | #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) | ||
44 | #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) | ||
45 | #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) | ||
46 | #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) | ||
47 | #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) | ||
48 | #define __HAVE_ARCH_PTE_SPECIAL | ||
49 | |||
50 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | ||
51 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) | ||
52 | #else | ||
53 | #define _PAGE_NX (_AT(pteval_t, 0)) | ||
54 | #endif | ||
55 | |||
56 | #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) | ||
57 | #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) | ||
58 | |||
59 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ | ||
60 | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
61 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ | ||
62 | _PAGE_DIRTY) | ||
63 | |||
64 | /* Set of bits not changed in pte_modify */ | ||
65 | #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ | ||
66 | _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
67 | |||
68 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) | ||
69 | #define _PAGE_CACHE_WB (0) | ||
70 | #define _PAGE_CACHE_WC (_PAGE_PWT) | ||
71 | #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) | ||
72 | #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) | ||
73 | |||
74 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | ||
75 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ | ||
76 | _PAGE_ACCESSED | _PAGE_NX) | ||
77 | |||
78 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ | ||
79 | _PAGE_USER | _PAGE_ACCESSED) | ||
80 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
81 | _PAGE_ACCESSED | _PAGE_NX) | ||
82 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
83 | _PAGE_ACCESSED) | ||
84 | #define PAGE_COPY PAGE_COPY_NOEXEC | ||
85 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
86 | _PAGE_ACCESSED | _PAGE_NX) | ||
87 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
88 | _PAGE_ACCESSED) | ||
89 | |||
90 | #define __PAGE_KERNEL_EXEC \ | ||
91 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) | ||
92 | #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) | ||
93 | |||
94 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | ||
95 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | ||
96 | #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) | ||
97 | #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) | ||
98 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) | ||
99 | #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) | ||
100 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) | ||
101 | #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) | ||
102 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | ||
103 | #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) | ||
104 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | ||
105 | |||
106 | #define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP) | ||
107 | #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP) | ||
108 | #define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP) | ||
109 | #define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP) | ||
110 | |||
111 | #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) | ||
112 | #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) | ||
113 | #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) | ||
114 | #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) | ||
115 | #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC) | ||
116 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) | ||
117 | #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS) | ||
118 | #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE) | ||
119 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) | ||
120 | #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE) | ||
121 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) | ||
122 | #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) | ||
123 | #define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE) | ||
124 | |||
125 | #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) | ||
126 | #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) | ||
127 | #define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS) | ||
128 | #define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC) | ||
129 | |||
130 | /* xwr */ | ||
131 | #define __P000 PAGE_NONE | ||
132 | #define __P001 PAGE_READONLY | ||
133 | #define __P010 PAGE_COPY | ||
134 | #define __P011 PAGE_COPY | ||
135 | #define __P100 PAGE_READONLY_EXEC | ||
136 | #define __P101 PAGE_READONLY_EXEC | ||
137 | #define __P110 PAGE_COPY_EXEC | ||
138 | #define __P111 PAGE_COPY_EXEC | ||
139 | |||
140 | #define __S000 PAGE_NONE | ||
141 | #define __S001 PAGE_READONLY | ||
142 | #define __S010 PAGE_SHARED | ||
143 | #define __S011 PAGE_SHARED | ||
144 | #define __S100 PAGE_READONLY_EXEC | ||
145 | #define __S101 PAGE_READONLY_EXEC | ||
146 | #define __S110 PAGE_SHARED_EXEC | ||
147 | #define __S111 PAGE_SHARED_EXEC | ||
148 | |||
149 | /* | ||
150 | * early identity mapping pte attrib macros. | ||
151 | */ | ||
152 | #ifdef CONFIG_X86_64 | ||
153 | #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC | ||
154 | #else | ||
155 | /* | ||
156 | * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection | ||
157 | * bits are combined, this will alow user to access the high address mapped | ||
158 | * VDSO in the presence of CONFIG_COMPAT_VDSO | ||
159 | */ | ||
160 | #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ | ||
161 | #define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ | ||
162 | #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ | ||
163 | #endif | ||
164 | 7 | ||
165 | /* | 8 | /* |
166 | * Macro to mark a page protection value as UC- | 9 | * Macro to mark a page protection value as UC- |
@@ -172,9 +15,6 @@ | |||
172 | 15 | ||
173 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
174 | 17 | ||
175 | #define pgprot_writecombine pgprot_writecombine | ||
176 | extern pgprot_t pgprot_writecombine(pgprot_t prot); | ||
177 | |||
178 | /* | 18 | /* |
179 | * ZERO_PAGE is a global shared page that is always zero: used | 19 | * ZERO_PAGE is a global shared page that is always zero: used |
180 | * for zero-mapped memory areas etc.. | 20 | * for zero-mapped memory areas etc.. |
@@ -316,8 +156,6 @@ static inline pte_t pte_mkspecial(pte_t pte) | |||
316 | return pte_set_flags(pte, _PAGE_SPECIAL); | 156 | return pte_set_flags(pte, _PAGE_SPECIAL); |
317 | } | 157 | } |
318 | 158 | ||
319 | extern pteval_t __supported_pte_mask; | ||
320 | |||
321 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | 159 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) |
322 | { | 160 | { |
323 | return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | | 161 | return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) | |
@@ -376,32 +214,6 @@ static inline int is_new_memtype_allowed(unsigned long flags, | |||
376 | return 1; | 214 | return 1; |
377 | } | 215 | } |
378 | 216 | ||
379 | #ifndef __ASSEMBLY__ | ||
380 | /* Indicate that x86 has its own track and untrack pfn vma functions */ | ||
381 | #define __HAVE_PFNMAP_TRACKING | ||
382 | |||
383 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
384 | struct file; | ||
385 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
386 | unsigned long size, pgprot_t vma_prot); | ||
387 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | ||
388 | unsigned long size, pgprot_t *vma_prot); | ||
389 | #endif | ||
390 | |||
391 | /* Install a pte for a particular vaddr in kernel space. */ | ||
392 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); | ||
393 | |||
394 | #ifdef CONFIG_X86_32 | ||
395 | extern void native_pagetable_setup_start(pgd_t *base); | ||
396 | extern void native_pagetable_setup_done(pgd_t *base); | ||
397 | #else | ||
398 | static inline void native_pagetable_setup_start(pgd_t *base) {} | ||
399 | static inline void native_pagetable_setup_done(pgd_t *base) {} | ||
400 | #endif | ||
401 | |||
402 | struct seq_file; | ||
403 | extern void arch_report_meminfo(struct seq_file *m); | ||
404 | |||
405 | #ifdef CONFIG_PARAVIRT | 217 | #ifdef CONFIG_PARAVIRT |
406 | #include <asm/paravirt.h> | 218 | #include <asm/paravirt.h> |
407 | #else /* !CONFIG_PARAVIRT */ | 219 | #else /* !CONFIG_PARAVIRT */ |
@@ -662,28 +474,6 @@ static inline int pgd_none(pgd_t pgd) | |||
662 | 474 | ||
663 | #ifndef __ASSEMBLY__ | 475 | #ifndef __ASSEMBLY__ |
664 | 476 | ||
665 | enum { | ||
666 | PG_LEVEL_NONE, | ||
667 | PG_LEVEL_4K, | ||
668 | PG_LEVEL_2M, | ||
669 | PG_LEVEL_1G, | ||
670 | PG_LEVEL_NUM | ||
671 | }; | ||
672 | |||
673 | #ifdef CONFIG_PROC_FS | ||
674 | extern void update_page_count(int level, unsigned long pages); | ||
675 | #else | ||
676 | static inline void update_page_count(int level, unsigned long pages) { } | ||
677 | #endif | ||
678 | |||
679 | /* | ||
680 | * Helper function that returns the kernel pagetable entry controlling | ||
681 | * the virtual address 'address'. NULL means no pagetable entry present. | ||
682 | * NOTE: the return type is pte_t but if the pmd is PSE then we return it | ||
683 | * as a pte too. | ||
684 | */ | ||
685 | extern pte_t *lookup_address(unsigned long address, unsigned int *level); | ||
686 | |||
687 | /* local pte updates need not use xchg for locking */ | 477 | /* local pte updates need not use xchg for locking */ |
688 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | 478 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) |
689 | { | 479 | { |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h new file mode 100644 index 000000000000..f80f5a66bb85 --- /dev/null +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -0,0 +1,220 @@ | |||
1 | #ifndef _ASM_X86_PGTABLE_DEFS_H | ||
2 | #define _ASM_X86_PGTABLE_DEFS_H | ||
3 | |||
4 | #include <linux/const.h> | ||
5 | |||
6 | #define FIRST_USER_ADDRESS 0 | ||
7 | |||
8 | #define _PAGE_BIT_PRESENT 0 /* is present */ | ||
9 | #define _PAGE_BIT_RW 1 /* writeable */ | ||
10 | #define _PAGE_BIT_USER 2 /* userspace addressable */ | ||
11 | #define _PAGE_BIT_PWT 3 /* page write through */ | ||
12 | #define _PAGE_BIT_PCD 4 /* page cache disabled */ | ||
13 | #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */ | ||
14 | #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */ | ||
15 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ | ||
16 | #define _PAGE_BIT_PAT 7 /* on 4KB pages */ | ||
17 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ | ||
18 | #define _PAGE_BIT_UNUSED1 9 /* available for programmer */ | ||
19 | #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ | ||
20 | #define _PAGE_BIT_UNUSED3 11 | ||
21 | #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ | ||
22 | #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 | ||
23 | #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 | ||
24 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ | ||
25 | |||
26 | /* If _PAGE_BIT_PRESENT is clear, we use these: */ | ||
27 | /* - if the user mapped it with PROT_NONE; pte_present gives true */ | ||
28 | #define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL | ||
29 | /* - set: nonlinear file mapping, saved PTE; unset:swap */ | ||
30 | #define _PAGE_BIT_FILE _PAGE_BIT_DIRTY | ||
31 | |||
32 | #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT) | ||
33 | #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW) | ||
34 | #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER) | ||
35 | #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT) | ||
36 | #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD) | ||
37 | #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED) | ||
38 | #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) | ||
39 | #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) | ||
40 | #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) | ||
41 | #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) | ||
42 | #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) | ||
43 | #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3) | ||
44 | #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) | ||
45 | #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) | ||
46 | #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL) | ||
47 | #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST) | ||
48 | #define __HAVE_ARCH_PTE_SPECIAL | ||
49 | |||
50 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | ||
51 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) | ||
52 | #else | ||
53 | #define _PAGE_NX (_AT(pteval_t, 0)) | ||
54 | #endif | ||
55 | |||
56 | #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) | ||
57 | #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE) | ||
58 | |||
59 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ | ||
60 | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
61 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ | ||
62 | _PAGE_DIRTY) | ||
63 | |||
64 | /* Set of bits not changed in pte_modify */ | ||
65 | #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ | ||
66 | _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
67 | |||
68 | #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) | ||
69 | #define _PAGE_CACHE_WB (0) | ||
70 | #define _PAGE_CACHE_WC (_PAGE_PWT) | ||
71 | #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD) | ||
72 | #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT) | ||
73 | |||
74 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | ||
75 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \ | ||
76 | _PAGE_ACCESSED | _PAGE_NX) | ||
77 | |||
78 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \ | ||
79 | _PAGE_USER | _PAGE_ACCESSED) | ||
80 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
81 | _PAGE_ACCESSED | _PAGE_NX) | ||
82 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
83 | _PAGE_ACCESSED) | ||
84 | #define PAGE_COPY PAGE_COPY_NOEXEC | ||
85 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
86 | _PAGE_ACCESSED | _PAGE_NX) | ||
87 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ | ||
88 | _PAGE_ACCESSED) | ||
89 | |||
90 | #define __PAGE_KERNEL_EXEC \ | ||
91 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) | ||
92 | #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) | ||
93 | |||
94 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW) | ||
95 | #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW) | ||
96 | #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT) | ||
97 | #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) | ||
98 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) | ||
99 | #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) | ||
100 | #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) | ||
101 | #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) | ||
102 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | ||
103 | #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) | ||
104 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | ||
105 | |||
106 | #define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP) | ||
107 | #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP) | ||
108 | #define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP) | ||
109 | #define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP) | ||
110 | |||
111 | #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) | ||
112 | #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) | ||
113 | #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) | ||
114 | #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX) | ||
115 | #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC) | ||
116 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) | ||
117 | #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS) | ||
118 | #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE) | ||
119 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) | ||
120 | #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE) | ||
121 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) | ||
122 | #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL) | ||
123 | #define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE) | ||
124 | |||
125 | #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO) | ||
126 | #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE) | ||
127 | #define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS) | ||
128 | #define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC) | ||
129 | |||
130 | /* xwr */ | ||
131 | #define __P000 PAGE_NONE | ||
132 | #define __P001 PAGE_READONLY | ||
133 | #define __P010 PAGE_COPY | ||
134 | #define __P011 PAGE_COPY | ||
135 | #define __P100 PAGE_READONLY_EXEC | ||
136 | #define __P101 PAGE_READONLY_EXEC | ||
137 | #define __P110 PAGE_COPY_EXEC | ||
138 | #define __P111 PAGE_COPY_EXEC | ||
139 | |||
140 | #define __S000 PAGE_NONE | ||
141 | #define __S001 PAGE_READONLY | ||
142 | #define __S010 PAGE_SHARED | ||
143 | #define __S011 PAGE_SHARED | ||
144 | #define __S100 PAGE_READONLY_EXEC | ||
145 | #define __S101 PAGE_READONLY_EXEC | ||
146 | #define __S110 PAGE_SHARED_EXEC | ||
147 | #define __S111 PAGE_SHARED_EXEC | ||
148 | |||
149 | /* | ||
150 | * early identity mapping pte attrib macros. | ||
151 | */ | ||
152 | #ifdef CONFIG_X86_64 | ||
153 | #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC | ||
154 | #else | ||
155 | /* | ||
156 | * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection | ||
157 | * bits are combined, this will alow user to access the high address mapped | ||
158 | * VDSO in the presence of CONFIG_COMPAT_VDSO | ||
159 | */ | ||
160 | #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ | ||
161 | #define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ | ||
162 | #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ | ||
163 | #endif | ||
164 | |||
165 | #ifndef __ASSEMBLY__ | ||
166 | |||
167 | extern pteval_t __supported_pte_mask; | ||
168 | |||
169 | #define pgprot_writecombine pgprot_writecombine | ||
170 | extern pgprot_t pgprot_writecombine(pgprot_t prot); | ||
171 | |||
172 | /* Indicate that x86 has its own track and untrack pfn vma functions */ | ||
173 | #define __HAVE_PFNMAP_TRACKING | ||
174 | |||
175 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
176 | struct file; | ||
177 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
178 | unsigned long size, pgprot_t vma_prot); | ||
179 | int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, | ||
180 | unsigned long size, pgprot_t *vma_prot); | ||
181 | |||
182 | /* Install a pte for a particular vaddr in kernel space. */ | ||
183 | void set_pte_vaddr(unsigned long vaddr, pte_t pte); | ||
184 | |||
185 | #ifdef CONFIG_X86_32 | ||
186 | extern void native_pagetable_setup_start(pgd_t *base); | ||
187 | extern void native_pagetable_setup_done(pgd_t *base); | ||
188 | #else | ||
189 | static inline void native_pagetable_setup_start(pgd_t *base) {} | ||
190 | static inline void native_pagetable_setup_done(pgd_t *base) {} | ||
191 | #endif | ||
192 | |||
193 | struct seq_file; | ||
194 | extern void arch_report_meminfo(struct seq_file *m); | ||
195 | |||
196 | enum { | ||
197 | PG_LEVEL_NONE, | ||
198 | PG_LEVEL_4K, | ||
199 | PG_LEVEL_2M, | ||
200 | PG_LEVEL_1G, | ||
201 | PG_LEVEL_NUM | ||
202 | }; | ||
203 | |||
204 | #ifdef CONFIG_PROC_FS | ||
205 | extern void update_page_count(int level, unsigned long pages); | ||
206 | #else | ||
207 | static inline void update_page_count(int level, unsigned long pages) { } | ||
208 | #endif | ||
209 | |||
210 | /* | ||
211 | * Helper function that returns the kernel pagetable entry controlling | ||
212 | * the virtual address 'address'. NULL means no pagetable entry present. | ||
213 | * NOTE: the return type is pte_t but if the pmd is PSE then we return it | ||
214 | * as a pte too. | ||
215 | */ | ||
216 | extern pte_t *lookup_address(unsigned long address, unsigned int *level); | ||
217 | |||
218 | #endif /* !__ASSEMBLY__ */ | ||
219 | |||
220 | #endif /* _ASM_X86_PGTABLE_DEFS_H */ | ||