diff options
Diffstat (limited to 'include/asm-x86/pgtable_64.h')
-rw-r--r-- | include/asm-x86/pgtable_64.h | 260 |
1 files changed, 46 insertions, 214 deletions
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 9b0ff477b39e..6e615a103c2f 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h | |||
@@ -17,22 +17,16 @@ extern pud_t level3_kernel_pgt[512]; | |||
17 | extern pud_t level3_ident_pgt[512]; | 17 | extern pud_t level3_ident_pgt[512]; |
18 | extern pmd_t level2_kernel_pgt[512]; | 18 | extern pmd_t level2_kernel_pgt[512]; |
19 | extern pgd_t init_level4_pgt[]; | 19 | extern pgd_t init_level4_pgt[]; |
20 | extern unsigned long __supported_pte_mask; | ||
21 | 20 | ||
22 | #define swapper_pg_dir init_level4_pgt | 21 | #define swapper_pg_dir init_level4_pgt |
23 | 22 | ||
24 | extern void paging_init(void); | 23 | extern void paging_init(void); |
25 | extern void clear_kernel_mapping(unsigned long addr, unsigned long size); | 24 | extern void clear_kernel_mapping(unsigned long addr, unsigned long size); |
26 | 25 | ||
27 | /* | ||
28 | * ZERO_PAGE is a global shared page that is always zero: used | ||
29 | * for zero-mapped memory areas etc.. | ||
30 | */ | ||
31 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | ||
32 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
33 | |||
34 | #endif /* !__ASSEMBLY__ */ | 26 | #endif /* !__ASSEMBLY__ */ |
35 | 27 | ||
28 | #define SHARED_KERNEL_PMD 1 | ||
29 | |||
36 | /* | 30 | /* |
37 | * PGDIR_SHIFT determines what a top-level page table entry can map | 31 | * PGDIR_SHIFT determines what a top-level page table entry can map |
38 | */ | 32 | */ |
@@ -71,57 +65,68 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | |||
71 | #define pgd_none(x) (!pgd_val(x)) | 65 | #define pgd_none(x) (!pgd_val(x)) |
72 | #define pud_none(x) (!pud_val(x)) | 66 | #define pud_none(x) (!pud_val(x)) |
73 | 67 | ||
74 | static inline void set_pte(pte_t *dst, pte_t val) | 68 | struct mm_struct; |
69 | |||
70 | static inline void native_pte_clear(struct mm_struct *mm, unsigned long addr, | ||
71 | pte_t *ptep) | ||
72 | { | ||
73 | *ptep = native_make_pte(0); | ||
74 | } | ||
75 | |||
76 | static inline void native_set_pte(pte_t *ptep, pte_t pte) | ||
75 | { | 77 | { |
76 | pte_val(*dst) = pte_val(val); | 78 | *ptep = pte; |
77 | } | 79 | } |
78 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
79 | 80 | ||
80 | static inline void set_pmd(pmd_t *dst, pmd_t val) | 81 | static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) |
81 | { | 82 | { |
82 | pmd_val(*dst) = pmd_val(val); | 83 | native_set_pte(ptep, pte); |
83 | } | 84 | } |
84 | 85 | ||
85 | static inline void set_pud(pud_t *dst, pud_t val) | 86 | static inline pte_t native_ptep_get_and_clear(pte_t *xp) |
86 | { | 87 | { |
87 | pud_val(*dst) = pud_val(val); | 88 | #ifdef CONFIG_SMP |
89 | return native_make_pte(xchg(&xp->pte, 0)); | ||
90 | #else | ||
91 | /* native_local_ptep_get_and_clear, but duplicated because of cyclic dependency */ | ||
92 | pte_t ret = *xp; | ||
93 | native_pte_clear(NULL, 0, xp); | ||
94 | return ret; | ||
95 | #endif | ||
88 | } | 96 | } |
89 | 97 | ||
90 | static inline void pud_clear (pud_t *pud) | 98 | static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) |
91 | { | 99 | { |
92 | set_pud(pud, __pud(0)); | 100 | *pmdp = pmd; |
93 | } | 101 | } |
94 | 102 | ||
95 | static inline void set_pgd(pgd_t *dst, pgd_t val) | 103 | static inline void native_pmd_clear(pmd_t *pmd) |
96 | { | 104 | { |
97 | pgd_val(*dst) = pgd_val(val); | 105 | native_set_pmd(pmd, native_make_pmd(0)); |
98 | } | 106 | } |
99 | 107 | ||
100 | static inline void pgd_clear (pgd_t * pgd) | 108 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
101 | { | 109 | { |
102 | set_pgd(pgd, __pgd(0)); | 110 | *pudp = pud; |
103 | } | 111 | } |
104 | 112 | ||
105 | #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) | 113 | static inline void native_pud_clear(pud_t *pud) |
114 | { | ||
115 | native_set_pud(pud, native_make_pud(0)); | ||
116 | } | ||
106 | 117 | ||
107 | struct mm_struct; | 118 | static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) |
119 | { | ||
120 | *pgdp = pgd; | ||
121 | } | ||
108 | 122 | ||
109 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) | 123 | static inline void native_pgd_clear(pgd_t * pgd) |
110 | { | 124 | { |
111 | pte_t pte; | 125 | native_set_pgd(pgd, native_make_pgd(0)); |
112 | if (full) { | ||
113 | pte = *ptep; | ||
114 | *ptep = __pte(0); | ||
115 | } else { | ||
116 | pte = ptep_get_and_clear(mm, addr, ptep); | ||
117 | } | ||
118 | return pte; | ||
119 | } | 126 | } |
120 | 127 | ||
121 | #define pte_same(a, b) ((a).pte == (b).pte) | 128 | #define pte_same(a, b) ((a).pte == (b).pte) |
122 | 129 | ||
123 | #define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK)) | ||
124 | |||
125 | #endif /* !__ASSEMBLY__ */ | 130 | #endif /* !__ASSEMBLY__ */ |
126 | 131 | ||
127 | #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) | 132 | #define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) |
@@ -131,8 +136,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
131 | #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) | 136 | #define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) |
132 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 137 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
133 | 138 | ||
134 | #define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1) | ||
135 | #define FIRST_USER_ADDRESS 0 | ||
136 | 139 | ||
137 | #define MAXMEM _AC(0x3fffffffffff, UL) | 140 | #define MAXMEM _AC(0x3fffffffffff, UL) |
138 | #define VMALLOC_START _AC(0xffffc20000000000, UL) | 141 | #define VMALLOC_START _AC(0xffffc20000000000, UL) |
@@ -142,91 +145,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long | |||
142 | #define MODULES_END _AC(0xfffffffffff00000, UL) | 145 | #define MODULES_END _AC(0xfffffffffff00000, UL) |
143 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) | 146 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) |
144 | 147 | ||
145 | #define _PAGE_BIT_PRESENT 0 | ||
146 | #define _PAGE_BIT_RW 1 | ||
147 | #define _PAGE_BIT_USER 2 | ||
148 | #define _PAGE_BIT_PWT 3 | ||
149 | #define _PAGE_BIT_PCD 4 | ||
150 | #define _PAGE_BIT_ACCESSED 5 | ||
151 | #define _PAGE_BIT_DIRTY 6 | ||
152 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ | ||
153 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ | ||
154 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ | ||
155 | |||
156 | #define _PAGE_PRESENT 0x001 | ||
157 | #define _PAGE_RW 0x002 | ||
158 | #define _PAGE_USER 0x004 | ||
159 | #define _PAGE_PWT 0x008 | ||
160 | #define _PAGE_PCD 0x010 | ||
161 | #define _PAGE_ACCESSED 0x020 | ||
162 | #define _PAGE_DIRTY 0x040 | ||
163 | #define _PAGE_PSE 0x080 /* 2MB page */ | ||
164 | #define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */ | ||
165 | #define _PAGE_GLOBAL 0x100 /* Global TLB entry */ | ||
166 | |||
167 | #define _PAGE_PROTNONE 0x080 /* If not present */ | ||
168 | #define _PAGE_NX (_AC(1,UL)<<_PAGE_BIT_NX) | ||
169 | |||
170 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
171 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
172 | |||
173 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
174 | |||
175 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | ||
176 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
177 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | ||
178 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
179 | #define PAGE_COPY PAGE_COPY_NOEXEC | ||
180 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
181 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
182 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
183 | #define __PAGE_KERNEL \ | ||
184 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) | ||
185 | #define __PAGE_KERNEL_EXEC \ | ||
186 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
187 | #define __PAGE_KERNEL_NOCACHE \ | ||
188 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX) | ||
189 | #define __PAGE_KERNEL_RO \ | ||
190 | (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) | ||
191 | #define __PAGE_KERNEL_VSYSCALL \ | ||
192 | (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
193 | #define __PAGE_KERNEL_VSYSCALL_NOCACHE \ | ||
194 | (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD) | ||
195 | #define __PAGE_KERNEL_LARGE \ | ||
196 | (__PAGE_KERNEL | _PAGE_PSE) | ||
197 | #define __PAGE_KERNEL_LARGE_EXEC \ | ||
198 | (__PAGE_KERNEL_EXEC | _PAGE_PSE) | ||
199 | |||
200 | #define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) | ||
201 | |||
202 | #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) | ||
203 | #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) | ||
204 | #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) | ||
205 | #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) | ||
206 | #define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL) | ||
207 | #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL) | ||
208 | #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE) | ||
209 | #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE) | ||
210 | |||
211 | /* xwr */ | ||
212 | #define __P000 PAGE_NONE | ||
213 | #define __P001 PAGE_READONLY | ||
214 | #define __P010 PAGE_COPY | ||
215 | #define __P011 PAGE_COPY | ||
216 | #define __P100 PAGE_READONLY_EXEC | ||
217 | #define __P101 PAGE_READONLY_EXEC | ||
218 | #define __P110 PAGE_COPY_EXEC | ||
219 | #define __P111 PAGE_COPY_EXEC | ||
220 | |||
221 | #define __S000 PAGE_NONE | ||
222 | #define __S001 PAGE_READONLY | ||
223 | #define __S010 PAGE_SHARED | ||
224 | #define __S011 PAGE_SHARED | ||
225 | #define __S100 PAGE_READONLY_EXEC | ||
226 | #define __S101 PAGE_READONLY_EXEC | ||
227 | #define __S110 PAGE_SHARED_EXEC | ||
228 | #define __S111 PAGE_SHARED_EXEC | ||
229 | |||
230 | #ifndef __ASSEMBLY__ | 148 | #ifndef __ASSEMBLY__ |
231 | 149 | ||
232 | static inline unsigned long pgd_bad(pgd_t pgd) | 150 | static inline unsigned long pgd_bad(pgd_t pgd) |
@@ -246,66 +164,16 @@ static inline unsigned long pmd_bad(pmd_t pmd) | |||
246 | 164 | ||
247 | #define pte_none(x) (!pte_val(x)) | 165 | #define pte_none(x) (!pte_val(x)) |
248 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 166 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
249 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | ||
250 | 167 | ||
251 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this | 168 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ |
252 | right? */ | ||
253 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 169 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
254 | #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) | 170 | #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) |
255 | 171 | ||
256 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | ||
257 | { | ||
258 | pte_t pte; | ||
259 | pte_val(pte) = (page_nr << PAGE_SHIFT); | ||
260 | pte_val(pte) |= pgprot_val(pgprot); | ||
261 | pte_val(pte) &= __supported_pte_mask; | ||
262 | return pte; | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * The following only work if pte_present() is true. | ||
267 | * Undefined behaviour if not.. | ||
268 | */ | ||
269 | #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) | ||
270 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
271 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
272 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | ||
273 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
274 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_PSE; } | ||
275 | |||
276 | static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } | ||
277 | static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } | ||
278 | static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } | ||
279 | static inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_NX)); return pte; } | ||
280 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } | ||
281 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } | ||
282 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } | ||
283 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_PSE)); return pte; } | ||
284 | static inline pte_t pte_clrhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_PSE)); return pte; } | ||
285 | |||
286 | struct vm_area_struct; | ||
287 | |||
288 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
289 | { | ||
290 | if (!pte_young(*ptep)) | ||
291 | return 0; | ||
292 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); | ||
293 | } | ||
294 | |||
295 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
296 | { | ||
297 | clear_bit(_PAGE_BIT_RW, &ptep->pte); | ||
298 | } | ||
299 | |||
300 | /* | 172 | /* |
301 | * Macro to mark a page protection value as "uncacheable". | 173 | * Macro to mark a page protection value as "uncacheable". |
302 | */ | 174 | */ |
303 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) | 175 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) |
304 | 176 | ||
305 | static inline int pmd_large(pmd_t pte) { | ||
306 | return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; | ||
307 | } | ||
308 | |||
309 | 177 | ||
310 | /* | 178 | /* |
311 | * Conversion functions: convert a page and protection to a page entry, | 179 | * Conversion functions: convert a page and protection to a page entry, |
@@ -340,29 +208,18 @@ static inline int pmd_large(pmd_t pte) { | |||
340 | pmd_index(address)) | 208 | pmd_index(address)) |
341 | #define pmd_none(x) (!pmd_val(x)) | 209 | #define pmd_none(x) (!pmd_val(x)) |
342 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | 210 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
343 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | ||
344 | #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) | 211 | #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) |
345 | #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) | 212 | #define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) |
346 | 213 | ||
347 | #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) | 214 | #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) |
348 | #define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE }) | 215 | #define pgoff_to_pte(off) ((pte_t) { .pte = ((off) << PAGE_SHIFT) | _PAGE_FILE }) |
349 | #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT | 216 | #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT |
350 | 217 | ||
351 | /* PTE - Level 1 access. */ | 218 | /* PTE - Level 1 access. */ |
352 | 219 | ||
353 | /* page, protection -> pte */ | 220 | /* page, protection -> pte */ |
354 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | 221 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) |
355 | #define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE) | ||
356 | 222 | ||
357 | /* Change flags of a PTE */ | ||
358 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
359 | { | ||
360 | pte_val(pte) &= _PAGE_CHG_MASK; | ||
361 | pte_val(pte) |= pgprot_val(newprot); | ||
362 | pte_val(pte) &= __supported_pte_mask; | ||
363 | return pte; | ||
364 | } | ||
365 | |||
366 | #define pte_index(address) \ | 223 | #define pte_index(address) \ |
367 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 224 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
368 | #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ | 225 | #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_vaddr(*(dir)) + \ |
@@ -376,40 +233,20 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
376 | 233 | ||
377 | #define update_mmu_cache(vma,address,pte) do { } while (0) | 234 | #define update_mmu_cache(vma,address,pte) do { } while (0) |
378 | 235 | ||
379 | /* We only update the dirty/accessed state if we set | ||
380 | * the dirty bit by hand in the kernel, since the hardware | ||
381 | * will do the accessed bit for us, and we don't want to | ||
382 | * race with other CPU's that might be updating the dirty | ||
383 | * bit at the same time. */ | ||
384 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
385 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
386 | ({ \ | ||
387 | int __changed = !pte_same(*(__ptep), __entry); \ | ||
388 | if (__changed && __dirty) { \ | ||
389 | set_pte(__ptep, __entry); \ | ||
390 | flush_tlb_page(__vma, __address); \ | ||
391 | } \ | ||
392 | __changed; \ | ||
393 | }) | ||
394 | |||
395 | /* Encode and de-code a swap entry */ | 236 | /* Encode and de-code a swap entry */ |
396 | #define __swp_type(x) (((x).val >> 1) & 0x3f) | 237 | #define __swp_type(x) (((x).val >> 1) & 0x3f) |
397 | #define __swp_offset(x) ((x).val >> 8) | 238 | #define __swp_offset(x) ((x).val >> 8) |
398 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | 239 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) |
399 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 240 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
400 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 241 | #define __swp_entry_to_pte(x) ((pte_t) { .pte = (x).val }) |
401 | |||
402 | extern spinlock_t pgd_lock; | ||
403 | extern struct list_head pgd_list; | ||
404 | 242 | ||
405 | extern int kern_addr_valid(unsigned long addr); | 243 | extern int kern_addr_valid(unsigned long addr); |
406 | 244 | ||
407 | pte_t *lookup_address(unsigned long addr); | ||
408 | |||
409 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | 245 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
410 | remap_pfn_range(vma, vaddr, pfn, size, prot) | 246 | remap_pfn_range(vma, vaddr, pfn, size, prot) |
411 | 247 | ||
412 | #define HAVE_ARCH_UNMAPPED_AREA | 248 | #define HAVE_ARCH_UNMAPPED_AREA |
249 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | ||
413 | 250 | ||
414 | #define pgtable_cache_init() do { } while (0) | 251 | #define pgtable_cache_init() do { } while (0) |
415 | #define check_pgt_cache() do { } while (0) | 252 | #define check_pgt_cache() do { } while (0) |
@@ -422,12 +259,7 @@ pte_t *lookup_address(unsigned long addr); | |||
422 | #define kc_offset_to_vaddr(o) \ | 259 | #define kc_offset_to_vaddr(o) \ |
423 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) | 260 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) |
424 | 261 | ||
425 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
426 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
427 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | ||
428 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
429 | #define __HAVE_ARCH_PTE_SAME | 262 | #define __HAVE_ARCH_PTE_SAME |
430 | #include <asm-generic/pgtable.h> | ||
431 | #endif /* !__ASSEMBLY__ */ | 263 | #endif /* !__ASSEMBLY__ */ |
432 | 264 | ||
433 | #endif /* _X86_64_PGTABLE_H */ | 265 | #endif /* _X86_64_PGTABLE_H */ |