diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-x86_64/pgtable.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-x86_64/pgtable.h')
-rw-r--r-- | include/asm-x86_64/pgtable.h | 437 |
1 files changed, 437 insertions, 0 deletions
diff --git a/include/asm-x86_64/pgtable.h b/include/asm-x86_64/pgtable.h new file mode 100644 index 000000000000..dc6b6f2604e8 --- /dev/null +++ b/include/asm-x86_64/pgtable.h | |||
@@ -0,0 +1,437 @@ | |||
1 | #ifndef _X86_64_PGTABLE_H | ||
2 | #define _X86_64_PGTABLE_H | ||
3 | |||
4 | /* | ||
5 | * This file contains the functions and defines necessary to modify and use | ||
6 | * the x86-64 page table tree. | ||
7 | */ | ||
8 | #include <asm/processor.h> | ||
9 | #include <asm/fixmap.h> | ||
10 | #include <asm/bitops.h> | ||
11 | #include <linux/threads.h> | ||
12 | #include <asm/pda.h> | ||
13 | |||
14 | extern pud_t level3_kernel_pgt[512]; | ||
15 | extern pud_t level3_physmem_pgt[512]; | ||
16 | extern pud_t level3_ident_pgt[512]; | ||
17 | extern pmd_t level2_kernel_pgt[512]; | ||
18 | extern pgd_t init_level4_pgt[]; | ||
19 | extern unsigned long __supported_pte_mask; | ||
20 | |||
21 | #define swapper_pg_dir init_level4_pgt | ||
22 | |||
23 | extern int nonx_setup(char *str); | ||
24 | extern void paging_init(void); | ||
25 | extern void clear_kernel_mapping(unsigned long addr, unsigned long size); | ||
26 | |||
27 | extern unsigned long pgkern_mask; | ||
28 | |||
29 | /* | ||
30 | * ZERO_PAGE is a global shared page that is always zero: used | ||
31 | * for zero-mapped memory areas etc.. | ||
32 | */ | ||
33 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | ||
34 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
35 | |||
36 | /* | ||
37 | * PGDIR_SHIFT determines what a top-level page table entry can map | ||
38 | */ | ||
39 | #define PGDIR_SHIFT 39 | ||
40 | #define PTRS_PER_PGD 512 | ||
41 | |||
42 | /* | ||
43 | * 3rd level page | ||
44 | */ | ||
45 | #define PUD_SHIFT 30 | ||
46 | #define PTRS_PER_PUD 512 | ||
47 | |||
48 | /* | ||
49 | * PMD_SHIFT determines the size of the area a middle-level | ||
50 | * page table can map | ||
51 | */ | ||
52 | #define PMD_SHIFT 21 | ||
53 | #define PTRS_PER_PMD 512 | ||
54 | |||
55 | /* | ||
56 | * entries per page directory level | ||
57 | */ | ||
58 | #define PTRS_PER_PTE 512 | ||
59 | |||
60 | #define pte_ERROR(e) \ | ||
61 | printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), pte_val(e)) | ||
62 | #define pmd_ERROR(e) \ | ||
63 | printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), pmd_val(e)) | ||
64 | #define pud_ERROR(e) \ | ||
65 | printk("%s:%d: bad pud %p(%016lx).\n", __FILE__, __LINE__, &(e), pud_val(e)) | ||
66 | #define pgd_ERROR(e) \ | ||
67 | printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) | ||
68 | |||
69 | #define pgd_none(x) (!pgd_val(x)) | ||
70 | #define pud_none(x) (!pud_val(x)) | ||
71 | |||
72 | static inline void set_pte(pte_t *dst, pte_t val) | ||
73 | { | ||
74 | pte_val(*dst) = pte_val(val); | ||
75 | } | ||
76 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
77 | |||
78 | static inline void set_pmd(pmd_t *dst, pmd_t val) | ||
79 | { | ||
80 | pmd_val(*dst) = pmd_val(val); | ||
81 | } | ||
82 | |||
83 | static inline void set_pud(pud_t *dst, pud_t val) | ||
84 | { | ||
85 | pud_val(*dst) = pud_val(val); | ||
86 | } | ||
87 | |||
88 | extern inline void pud_clear (pud_t *pud) | ||
89 | { | ||
90 | set_pud(pud, __pud(0)); | ||
91 | } | ||
92 | |||
93 | static inline void set_pgd(pgd_t *dst, pgd_t val) | ||
94 | { | ||
95 | pgd_val(*dst) = pgd_val(val); | ||
96 | } | ||
97 | |||
98 | extern inline void pgd_clear (pgd_t * pgd) | ||
99 | { | ||
100 | set_pgd(pgd, __pgd(0)); | ||
101 | } | ||
102 | |||
103 | #define pud_page(pud) \ | ||
104 | ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK)) | ||
105 | |||
106 | #define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) | ||
107 | #define pte_same(a, b) ((a).pte == (b).pte) | ||
108 | |||
109 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
110 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
111 | #define PUD_SIZE (1UL << PUD_SHIFT) | ||
112 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
113 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
114 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
115 | |||
116 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | ||
117 | #define FIRST_USER_PGD_NR 0 | ||
118 | |||
119 | #ifndef __ASSEMBLY__ | ||
120 | #define MAXMEM 0x3fffffffffffUL | ||
121 | #define VMALLOC_START 0xffffc20000000000UL | ||
122 | #define VMALLOC_END 0xffffe1ffffffffffUL | ||
123 | #define MODULES_VADDR 0xffffffff88000000UL | ||
124 | #define MODULES_END 0xfffffffffff00000UL | ||
125 | #define MODULES_LEN (MODULES_END - MODULES_VADDR) | ||
126 | |||
127 | #define _PAGE_BIT_PRESENT 0 | ||
128 | #define _PAGE_BIT_RW 1 | ||
129 | #define _PAGE_BIT_USER 2 | ||
130 | #define _PAGE_BIT_PWT 3 | ||
131 | #define _PAGE_BIT_PCD 4 | ||
132 | #define _PAGE_BIT_ACCESSED 5 | ||
133 | #define _PAGE_BIT_DIRTY 6 | ||
134 | #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ | ||
135 | #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ | ||
136 | #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ | ||
137 | |||
138 | #define _PAGE_PRESENT 0x001 | ||
139 | #define _PAGE_RW 0x002 | ||
140 | #define _PAGE_USER 0x004 | ||
141 | #define _PAGE_PWT 0x008 | ||
142 | #define _PAGE_PCD 0x010 | ||
143 | #define _PAGE_ACCESSED 0x020 | ||
144 | #define _PAGE_DIRTY 0x040 | ||
145 | #define _PAGE_PSE 0x080 /* 2MB page */ | ||
146 | #define _PAGE_FILE 0x040 /* set:pagecache, unset:swap */ | ||
147 | #define _PAGE_GLOBAL 0x100 /* Global TLB entry */ | ||
148 | |||
149 | #define _PAGE_PROTNONE 0x080 /* If not present */ | ||
150 | #define _PAGE_NX (1UL<<_PAGE_BIT_NX) | ||
151 | |||
152 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
153 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
154 | |||
155 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
156 | |||
157 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | ||
158 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
159 | #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED) | ||
160 | #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
161 | #define PAGE_COPY PAGE_COPY_NOEXEC | ||
162 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
163 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX) | ||
164 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
165 | #define __PAGE_KERNEL \ | ||
166 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) | ||
167 | #define __PAGE_KERNEL_EXEC \ | ||
168 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
169 | #define __PAGE_KERNEL_NOCACHE \ | ||
170 | (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX) | ||
171 | #define __PAGE_KERNEL_RO \ | ||
172 | (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX) | ||
173 | #define __PAGE_KERNEL_VSYSCALL \ | ||
174 | (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
175 | #define __PAGE_KERNEL_VSYSCALL_NOCACHE \ | ||
176 | (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD) | ||
177 | #define __PAGE_KERNEL_LARGE \ | ||
178 | (__PAGE_KERNEL | _PAGE_PSE) | ||
179 | |||
180 | #define MAKE_GLOBAL(x) __pgprot((x) | _PAGE_GLOBAL) | ||
181 | |||
182 | #define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL) | ||
183 | #define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC) | ||
184 | #define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO) | ||
185 | #define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE) | ||
186 | #define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL) | ||
187 | #define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL) | ||
188 | #define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE) | ||
189 | #define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE) | ||
190 | |||
191 | /* xwr */ | ||
192 | #define __P000 PAGE_NONE | ||
193 | #define __P001 PAGE_READONLY | ||
194 | #define __P010 PAGE_COPY | ||
195 | #define __P011 PAGE_COPY | ||
196 | #define __P100 PAGE_READONLY_EXEC | ||
197 | #define __P101 PAGE_READONLY_EXEC | ||
198 | #define __P110 PAGE_COPY_EXEC | ||
199 | #define __P111 PAGE_COPY_EXEC | ||
200 | |||
201 | #define __S000 PAGE_NONE | ||
202 | #define __S001 PAGE_READONLY | ||
203 | #define __S010 PAGE_SHARED | ||
204 | #define __S011 PAGE_SHARED | ||
205 | #define __S100 PAGE_READONLY_EXEC | ||
206 | #define __S101 PAGE_READONLY_EXEC | ||
207 | #define __S110 PAGE_SHARED_EXEC | ||
208 | #define __S111 PAGE_SHARED_EXEC | ||
209 | |||
210 | static inline unsigned long pgd_bad(pgd_t pgd) | ||
211 | { | ||
212 | unsigned long val = pgd_val(pgd); | ||
213 | val &= ~PTE_MASK; | ||
214 | val &= ~(_PAGE_USER | _PAGE_DIRTY); | ||
215 | return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); | ||
216 | } | ||
217 | |||
218 | static inline unsigned long pud_bad(pud_t pud) | ||
219 | { | ||
220 | unsigned long val = pud_val(pud); | ||
221 | val &= ~PTE_MASK; | ||
222 | val &= ~(_PAGE_USER | _PAGE_DIRTY); | ||
223 | return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED); | ||
224 | } | ||
225 | |||
226 | #define pte_none(x) (!pte_val(x)) | ||
227 | #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) | ||
228 | #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) | ||
229 | |||
230 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this | ||
231 | right? */ | ||
232 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
233 | #define pte_pfn(x) ((pte_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK) | ||
234 | |||
235 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | ||
236 | { | ||
237 | pte_t pte; | ||
238 | pte_val(pte) = (page_nr << PAGE_SHIFT); | ||
239 | pte_val(pte) |= pgprot_val(pgprot); | ||
240 | pte_val(pte) &= __supported_pte_mask; | ||
241 | return pte; | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * The following only work if pte_present() is true. | ||
246 | * Undefined behaviour if not.. | ||
247 | */ | ||
248 | static inline int pte_user(pte_t pte) { return pte_val(pte) & _PAGE_USER; } | ||
249 | extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER; } | ||
250 | extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_USER; } | ||
251 | extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
252 | extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
253 | extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | ||
254 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
255 | |||
256 | extern inline pte_t pte_rdprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } | ||
257 | extern inline pte_t pte_exprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_USER)); return pte; } | ||
258 | extern inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } | ||
259 | extern inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } | ||
260 | extern inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_RW)); return pte; } | ||
261 | extern inline pte_t pte_mkread(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } | ||
262 | extern inline pte_t pte_mkexec(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_USER)); return pte; } | ||
263 | extern inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } | ||
264 | extern inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } | ||
265 | extern inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_RW)); return pte; } | ||
266 | |||
267 | struct vm_area_struct; | ||
268 | |||
269 | static inline int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
270 | { | ||
271 | if (!pte_dirty(*ptep)) | ||
272 | return 0; | ||
273 | return test_and_clear_bit(_PAGE_BIT_DIRTY, ptep); | ||
274 | } | ||
275 | |||
276 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
277 | { | ||
278 | if (!pte_young(*ptep)) | ||
279 | return 0; | ||
280 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep); | ||
281 | } | ||
282 | |||
283 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
284 | { | ||
285 | clear_bit(_PAGE_BIT_RW, ptep); | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * Macro to mark a page protection value as "uncacheable". | ||
290 | */ | ||
291 | #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) | ||
292 | |||
293 | #define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT) | ||
294 | static inline int pmd_large(pmd_t pte) { | ||
295 | return (pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE; | ||
296 | } | ||
297 | |||
298 | |||
299 | /* | ||
300 | * Conversion functions: convert a page and protection to a page entry, | ||
301 | * and a page entry and page directory to the page they refer to. | ||
302 | */ | ||
303 | |||
304 | #define page_pte(page) page_pte_prot(page, __pgprot(0)) | ||
305 | |||
306 | /* | ||
307 | * Level 4 access. | ||
308 | */ | ||
309 | #define pgd_page(pgd) ((unsigned long) __va((unsigned long)pgd_val(pgd) & PTE_MASK)) | ||
310 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | ||
311 | #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr)) | ||
312 | #define pgd_offset_k(address) (init_level4_pgt + pgd_index(address)) | ||
313 | #define pgd_present(pgd) (pgd_val(pgd) & _PAGE_PRESENT) | ||
314 | #define mk_kernel_pgd(address) ((pgd_t){ (address) | _KERNPG_TABLE }) | ||
315 | |||
316 | /* PUD - Level3 access */ | ||
317 | /* to find an entry in a page-table-directory. */ | ||
318 | #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | ||
319 | #define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address)) | ||
320 | #define pud_offset_k(pgd, addr) pud_offset(pgd, addr) | ||
321 | #define pud_present(pud) (pud_val(pud) & _PAGE_PRESENT) | ||
322 | |||
323 | static inline pud_t *__pud_offset_k(pud_t *pud, unsigned long address) | ||
324 | { | ||
325 | return pud + pud_index(address); | ||
326 | } | ||
327 | |||
328 | /* PMD - Level 2 access */ | ||
329 | #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK)) | ||
330 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | ||
331 | |||
332 | #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | ||
333 | #define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \ | ||
334 | pmd_index(address)) | ||
335 | #define pmd_none(x) (!pmd_val(x)) | ||
336 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) | ||
337 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | ||
338 | #define pmd_bad(x) ((pmd_val(x) & (~PTE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE ) | ||
339 | #define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot))) | ||
340 | #define pmd_pfn(x) ((pmd_val(x) >> PAGE_SHIFT) & __PHYSICAL_MASK) | ||
341 | |||
342 | #define pte_to_pgoff(pte) ((pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT) | ||
343 | #define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE }) | ||
344 | #define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT | ||
345 | |||
346 | /* PTE - Level 1 access. */ | ||
347 | |||
348 | /* page, protection -> pte */ | ||
349 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
350 | #define mk_pte_huge(entry) (pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE) | ||
351 | |||
352 | /* physical address -> PTE */ | ||
353 | static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | ||
354 | { | ||
355 | pte_t pte; | ||
356 | pte_val(pte) = physpage | pgprot_val(pgprot); | ||
357 | return pte; | ||
358 | } | ||
359 | |||
360 | /* Change flags of a PTE */ | ||
361 | extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
362 | { | ||
363 | pte_val(pte) &= _PAGE_CHG_MASK; | ||
364 | pte_val(pte) |= pgprot_val(newprot); | ||
365 | pte_val(pte) &= __supported_pte_mask; | ||
366 | return pte; | ||
367 | } | ||
368 | |||
369 | #define pte_index(address) \ | ||
370 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
371 | #define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \ | ||
372 | pte_index(address)) | ||
373 | |||
374 | /* x86-64 always has all page tables mapped. */ | ||
375 | #define pte_offset_map(dir,address) pte_offset_kernel(dir,address) | ||
376 | #define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address) | ||
377 | #define pte_unmap(pte) /* NOP */ | ||
378 | #define pte_unmap_nested(pte) /* NOP */ | ||
379 | |||
380 | #define update_mmu_cache(vma,address,pte) do { } while (0) | ||
381 | |||
382 | /* We only update the dirty/accessed state if we set | ||
383 | * the dirty bit by hand in the kernel, since the hardware | ||
384 | * will do the accessed bit for us, and we don't want to | ||
385 | * race with other CPU's that might be updating the dirty | ||
386 | * bit at the same time. */ | ||
387 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
388 | #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ | ||
389 | do { \ | ||
390 | if (__dirty) { \ | ||
391 | set_pte(__ptep, __entry); \ | ||
392 | flush_tlb_page(__vma, __address); \ | ||
393 | } \ | ||
394 | } while (0) | ||
395 | |||
396 | /* Encode and de-code a swap entry */ | ||
397 | #define __swp_type(x) (((x).val >> 1) & 0x3f) | ||
398 | #define __swp_offset(x) ((x).val >> 8) | ||
399 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | ||
400 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
401 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
402 | |||
403 | #endif /* !__ASSEMBLY__ */ | ||
404 | |||
405 | extern int kern_addr_valid(unsigned long addr); | ||
406 | |||
407 | #define io_remap_page_range(vma, vaddr, paddr, size, prot) \ | ||
408 | remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) | ||
409 | |||
410 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
411 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
412 | |||
413 | #define MK_IOSPACE_PFN(space, pfn) (pfn) | ||
414 | #define GET_IOSPACE(pfn) 0 | ||
415 | #define GET_PFN(pfn) (pfn) | ||
416 | |||
417 | #define HAVE_ARCH_UNMAPPED_AREA | ||
418 | |||
419 | #define pgtable_cache_init() do { } while (0) | ||
420 | #define check_pgt_cache() do { } while (0) | ||
421 | |||
422 | #define PAGE_AGP PAGE_KERNEL_NOCACHE | ||
423 | #define HAVE_PAGE_AGP 1 | ||
424 | |||
425 | /* fs/proc/kcore.c */ | ||
426 | #define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK) | ||
427 | #define kc_offset_to_vaddr(o) \ | ||
428 | (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) | ||
429 | |||
430 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
431 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
432 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
433 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
434 | #define __HAVE_ARCH_PTE_SAME | ||
435 | #include <asm-generic/pgtable.h> | ||
436 | |||
437 | #endif /* _X86_64_PGTABLE_H */ | ||