diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ia64/pgtable.h |
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-ia64/pgtable.h')
-rw-r--r-- | include/asm-ia64/pgtable.h | 593 |
1 files changed, 593 insertions, 0 deletions
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h new file mode 100644 index 00000000000..1757a811f43 --- /dev/null +++ b/include/asm-ia64/pgtable.h | |||
@@ -0,0 +1,593 @@ | |||
1 | #ifndef _ASM_IA64_PGTABLE_H | ||
2 | #define _ASM_IA64_PGTABLE_H | ||
3 | |||
4 | /* | ||
5 | * This file contains the functions and defines necessary to modify and use | ||
6 | * the IA-64 page table tree. | ||
7 | * | ||
8 | * This hopefully works with any (fixed) IA-64 page-size, as defined | ||
9 | * in <asm/page.h>. | ||
10 | * | ||
11 | * Copyright (C) 1998-2004 Hewlett-Packard Co | ||
12 | * David Mosberger-Tang <davidm@hpl.hp.com> | ||
13 | */ | ||
14 | |||
15 | #include <linux/config.h> | ||
16 | |||
17 | #include <asm/mman.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/processor.h> | ||
20 | #include <asm/system.h> | ||
21 | #include <asm/types.h> | ||
22 | |||
23 | #define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */ | ||
24 | |||
25 | /* | ||
26 | * First, define the various bits in a PTE. Note that the PTE format | ||
27 | * matches the VHPT short format, the firt doubleword of the VHPD long | ||
28 | * format, and the first doubleword of the TLB insertion format. | ||
29 | */ | ||
30 | #define _PAGE_P_BIT 0 | ||
31 | #define _PAGE_A_BIT 5 | ||
32 | #define _PAGE_D_BIT 6 | ||
33 | |||
34 | #define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */ | ||
35 | #define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */ | ||
36 | #define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */ | ||
37 | #define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */ | ||
38 | #define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */ | ||
39 | #define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */ | ||
40 | #define _PAGE_MA_MASK (0x7 << 2) | ||
41 | #define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */ | ||
42 | #define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */ | ||
43 | #define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */ | ||
44 | #define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */ | ||
45 | #define _PAGE_PL_MASK (3 << 7) | ||
46 | #define _PAGE_AR_R (0 << 9) /* read only */ | ||
47 | #define _PAGE_AR_RX (1 << 9) /* read & execute */ | ||
48 | #define _PAGE_AR_RW (2 << 9) /* read & write */ | ||
49 | #define _PAGE_AR_RWX (3 << 9) /* read, write & execute */ | ||
50 | #define _PAGE_AR_R_RW (4 << 9) /* read / read & write */ | ||
51 | #define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */ | ||
52 | #define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */ | ||
53 | #define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */ | ||
54 | #define _PAGE_AR_MASK (7 << 9) | ||
55 | #define _PAGE_AR_SHIFT 9 | ||
56 | #define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */ | ||
57 | #define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */ | ||
58 | #define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL) | ||
59 | #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */ | ||
60 | #define _PAGE_PROTNONE (__IA64_UL(1) << 63) | ||
61 | |||
62 | /* Valid only for a PTE with the present bit cleared: */ | ||
63 | #define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */ | ||
64 | |||
65 | #define _PFN_MASK _PAGE_PPN_MASK | ||
66 | /* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */ | ||
67 | #define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED) | ||
68 | |||
69 | #define _PAGE_SIZE_4K 12 | ||
70 | #define _PAGE_SIZE_8K 13 | ||
71 | #define _PAGE_SIZE_16K 14 | ||
72 | #define _PAGE_SIZE_64K 16 | ||
73 | #define _PAGE_SIZE_256K 18 | ||
74 | #define _PAGE_SIZE_1M 20 | ||
75 | #define _PAGE_SIZE_4M 22 | ||
76 | #define _PAGE_SIZE_16M 24 | ||
77 | #define _PAGE_SIZE_64M 26 | ||
78 | #define _PAGE_SIZE_256M 28 | ||
79 | #define _PAGE_SIZE_1G 30 | ||
80 | #define _PAGE_SIZE_4G 32 | ||
81 | |||
82 | #define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB | ||
83 | #define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB | ||
84 | #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED | ||
85 | |||
86 | /* | ||
87 | * Definitions for first level: | ||
88 | * | ||
89 | * PGDIR_SHIFT determines what a first-level page table entry can map. | ||
90 | */ | ||
91 | #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3)) | ||
92 | #define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT) | ||
93 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
94 | #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3)) | ||
95 | #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */ | ||
96 | #define FIRST_USER_PGD_NR 0 | ||
97 | |||
98 | /* | ||
99 | * Definitions for second level: | ||
100 | * | ||
101 | * PMD_SHIFT determines the size of the area a second-level page table | ||
102 | * can map. | ||
103 | */ | ||
104 | #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) | ||
105 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
106 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
107 | #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) | ||
108 | |||
109 | /* | ||
110 | * Definitions for third level: | ||
111 | */ | ||
112 | #define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3)) | ||
113 | |||
114 | /* | ||
115 | * All the normal masks have the "page accessed" bits on, as any time | ||
116 | * they are used, the page is accessed. They are cleared only by the | ||
117 | * page-out routines. | ||
118 | */ | ||
119 | #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A) | ||
120 | #define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) | ||
121 | #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) | ||
122 | #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) | ||
123 | #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) | ||
124 | #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) | ||
125 | #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) | ||
126 | #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) | ||
127 | |||
128 | # ifndef __ASSEMBLY__ | ||
129 | |||
130 | #include <asm/bitops.h> | ||
131 | #include <asm/cacheflush.h> | ||
132 | #include <asm/mmu_context.h> | ||
133 | #include <asm/processor.h> | ||
134 | |||
135 | /* | ||
136 | * Next come the mappings that determine how mmap() protection bits | ||
137 | * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The | ||
138 | * _P version gets used for a private shared memory segment, the _S | ||
139 | * version gets used for a shared memory segment with MAP_SHARED on. | ||
140 | * In a private shared memory segment, we do a copy-on-write if a task | ||
141 | * attempts to write to the page. | ||
142 | */ | ||
143 | /* xwr */ | ||
144 | #define __P000 PAGE_NONE | ||
145 | #define __P001 PAGE_READONLY | ||
146 | #define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */ | ||
147 | #define __P011 PAGE_READONLY /* ditto */ | ||
148 | #define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX) | ||
149 | #define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) | ||
150 | #define __P110 PAGE_COPY_EXEC | ||
151 | #define __P111 PAGE_COPY_EXEC | ||
152 | |||
153 | #define __S000 PAGE_NONE | ||
154 | #define __S001 PAGE_READONLY | ||
155 | #define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */ | ||
156 | #define __S011 PAGE_SHARED | ||
157 | #define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX) | ||
158 | #define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) | ||
159 | #define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) | ||
160 | #define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) | ||
161 | |||
162 | #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
163 | #define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) | ||
164 | #define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) | ||
165 | |||
166 | |||
167 | /* | ||
168 | * Some definitions to translate between mem_map, PTEs, and page addresses: | ||
169 | */ | ||
170 | |||
171 | |||
172 | /* Quick test to see if ADDR is a (potentially) valid physical address. */ | ||
173 | static inline long | ||
174 | ia64_phys_addr_valid (unsigned long addr) | ||
175 | { | ||
176 | return (addr & (local_cpu_data->unimpl_pa_mask)) == 0; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel | ||
181 | * memory. For the return value to be meaningful, ADDR must be >= | ||
182 | * PAGE_OFFSET. This operation can be relatively expensive (e.g., | ||
183 | * require a hash-, or multi-level tree-lookup or something of that | ||
184 | * sort) but it guarantees to return TRUE only if accessing the page | ||
185 | * at that address does not cause an error. Note that there may be | ||
186 | * addresses for which kern_addr_valid() returns FALSE even though an | ||
187 | * access would not cause an error (e.g., this is typically true for | ||
188 | * memory mapped I/O regions. | ||
189 | * | ||
190 | * XXX Need to implement this for IA-64. | ||
191 | */ | ||
192 | #define kern_addr_valid(addr) (1) | ||
193 | |||
194 | |||
195 | /* | ||
196 | * Now come the defines and routines to manage and access the three-level | ||
197 | * page table. | ||
198 | */ | ||
199 | |||
200 | /* | ||
201 | * On some architectures, special things need to be done when setting | ||
202 | * the PTE in a page table. Nothing special needs to be on IA-64. | ||
203 | */ | ||
204 | #define set_pte(ptep, pteval) (*(ptep) = (pteval)) | ||
205 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
206 | |||
207 | #define RGN_SIZE (1UL << 61) | ||
208 | #define RGN_KERNEL 7 | ||
209 | |||
210 | #define VMALLOC_START 0xa000000200000000UL | ||
211 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
212 | # define VMALLOC_END_INIT (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9))) | ||
213 | # define VMALLOC_END vmalloc_end | ||
214 | extern unsigned long vmalloc_end; | ||
215 | #else | ||
216 | # define VMALLOC_END (0xa000000000000000UL + (1UL << (4*PAGE_SHIFT - 9))) | ||
217 | #endif | ||
218 | |||
219 | /* fs/proc/kcore.c */ | ||
220 | #define kc_vaddr_to_offset(v) ((v) - 0xa000000000000000UL) | ||
221 | #define kc_offset_to_vaddr(o) ((o) + 0xa000000000000000UL) | ||
222 | |||
223 | /* | ||
224 | * Conversion functions: convert page frame number (pfn) and a protection value to a page | ||
225 | * table entry (pte). | ||
226 | */ | ||
227 | #define pfn_pte(pfn, pgprot) \ | ||
228 | ({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; }) | ||
229 | |||
230 | /* Extract pfn from pte. */ | ||
231 | #define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT) | ||
232 | |||
233 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
234 | |||
235 | /* This takes a physical page address that is used by the remapping functions */ | ||
236 | #define mk_pte_phys(physpage, pgprot) \ | ||
237 | ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; }) | ||
238 | |||
239 | #define pte_modify(_pte, newprot) \ | ||
240 | (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK))) | ||
241 | |||
242 | #define page_pte_prot(page,prot) mk_pte(page, prot) | ||
243 | #define page_pte(page) page_pte_prot(page, __pgprot(0)) | ||
244 | |||
245 | #define pte_none(pte) (!pte_val(pte)) | ||
246 | #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE)) | ||
247 | #define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL) | ||
248 | /* pte_page() returns the "struct page *" corresponding to the PTE: */ | ||
249 | #define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET)) | ||
250 | |||
251 | #define pmd_none(pmd) (!pmd_val(pmd)) | ||
252 | #define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd))) | ||
253 | #define pmd_present(pmd) (pmd_val(pmd) != 0UL) | ||
254 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL) | ||
255 | #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK)) | ||
256 | #define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET)) | ||
257 | |||
258 | #define pud_none(pud) (!pud_val(pud)) | ||
259 | #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) | ||
260 | #define pud_present(pud) (pud_val(pud) != 0UL) | ||
261 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) | ||
262 | |||
263 | #define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) | ||
264 | |||
265 | /* | ||
266 | * The following have defined behavior only work if pte_present() is true. | ||
267 | */ | ||
268 | #define pte_user(pte) ((pte_val(pte) & _PAGE_PL_MASK) == _PAGE_PL_3) | ||
269 | #define pte_read(pte) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6) | ||
270 | #define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4) | ||
271 | #define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0) | ||
272 | #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0) | ||
273 | #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0) | ||
274 | #define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0) | ||
275 | /* | ||
276 | * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the | ||
277 | * access rights: | ||
278 | */ | ||
279 | #define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW)) | ||
280 | #define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW)) | ||
281 | #define pte_mkexec(pte) (__pte(pte_val(pte) | _PAGE_AR_RX)) | ||
282 | #define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A)) | ||
283 | #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A)) | ||
284 | #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D)) | ||
285 | #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D)) | ||
286 | |||
287 | /* | ||
288 | * Macro to a page protection value as "uncacheable". Note that "protection" is really a | ||
289 | * misnomer here as the protection value contains the memory attribute bits, dirty bits, | ||
290 | * and various other bits as well. | ||
291 | */ | ||
292 | #define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC) | ||
293 | |||
294 | /* | ||
295 | * Macro to make mark a page protection value as "write-combining". | ||
296 | * Note that "protection" is really a misnomer here as the protection | ||
297 | * value contains the memory attribute bits, dirty bits, and various | ||
298 | * other bits as well. Accesses through a write-combining translation | ||
299 | * works bypasses the caches, but does allow for consecutive writes to | ||
300 | * be combined into single (but larger) write transactions. | ||
301 | */ | ||
302 | #define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC) | ||
303 | |||
304 | static inline unsigned long | ||
305 | pgd_index (unsigned long address) | ||
306 | { | ||
307 | unsigned long region = address >> 61; | ||
308 | unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1); | ||
309 | |||
310 | return (region << (PAGE_SHIFT - 6)) | l1index; | ||
311 | } | ||
312 | |||
313 | /* The offset in the 1-level directory is given by the 3 region bits | ||
314 | (61..63) and the level-1 bits. */ | ||
315 | static inline pgd_t* | ||
316 | pgd_offset (struct mm_struct *mm, unsigned long address) | ||
317 | { | ||
318 | return mm->pgd + pgd_index(address); | ||
319 | } | ||
320 | |||
321 | /* In the kernel's mapped region we completely ignore the region number | ||
322 | (since we know it's in region number 5). */ | ||
323 | #define pgd_offset_k(addr) \ | ||
324 | (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))) | ||
325 | |||
326 | /* Look up a pgd entry in the gate area. On IA-64, the gate-area | ||
327 | resides in the kernel-mapped segment, hence we use pgd_offset_k() | ||
328 | here. */ | ||
329 | #define pgd_offset_gate(mm, addr) pgd_offset_k(addr) | ||
330 | |||
331 | /* Find an entry in the second-level page table.. */ | ||
332 | #define pmd_offset(dir,addr) \ | ||
333 | ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) | ||
334 | |||
335 | /* | ||
336 | * Find an entry in the third-level page table. This looks more complicated than it | ||
337 | * should be because some platforms place page tables in high memory. | ||
338 | */ | ||
339 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
340 | #define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr)) | ||
341 | #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) | ||
342 | #define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr) | ||
343 | #define pte_unmap(pte) do { } while (0) | ||
344 | #define pte_unmap_nested(pte) do { } while (0) | ||
345 | |||
346 | /* atomic versions of the some PTE manipulations: */ | ||
347 | |||
348 | static inline int | ||
349 | ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
350 | { | ||
351 | #ifdef CONFIG_SMP | ||
352 | if (!pte_young(*ptep)) | ||
353 | return 0; | ||
354 | return test_and_clear_bit(_PAGE_A_BIT, ptep); | ||
355 | #else | ||
356 | pte_t pte = *ptep; | ||
357 | if (!pte_young(pte)) | ||
358 | return 0; | ||
359 | set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); | ||
360 | return 1; | ||
361 | #endif | ||
362 | } | ||
363 | |||
364 | static inline int | ||
365 | ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
366 | { | ||
367 | #ifdef CONFIG_SMP | ||
368 | if (!pte_dirty(*ptep)) | ||
369 | return 0; | ||
370 | return test_and_clear_bit(_PAGE_D_BIT, ptep); | ||
371 | #else | ||
372 | pte_t pte = *ptep; | ||
373 | if (!pte_dirty(pte)) | ||
374 | return 0; | ||
375 | set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte)); | ||
376 | return 1; | ||
377 | #endif | ||
378 | } | ||
379 | |||
380 | static inline pte_t | ||
381 | ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
382 | { | ||
383 | #ifdef CONFIG_SMP | ||
384 | return __pte(xchg((long *) ptep, 0)); | ||
385 | #else | ||
386 | pte_t pte = *ptep; | ||
387 | pte_clear(mm, addr, ptep); | ||
388 | return pte; | ||
389 | #endif | ||
390 | } | ||
391 | |||
392 | static inline void | ||
393 | ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
394 | { | ||
395 | #ifdef CONFIG_SMP | ||
396 | unsigned long new, old; | ||
397 | |||
398 | do { | ||
399 | old = pte_val(*ptep); | ||
400 | new = pte_val(pte_wrprotect(__pte (old))); | ||
401 | } while (cmpxchg((unsigned long *) ptep, old, new) != old); | ||
402 | #else | ||
403 | pte_t old_pte = *ptep; | ||
404 | set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | ||
405 | #endif | ||
406 | } | ||
407 | |||
408 | static inline int | ||
409 | pte_same (pte_t a, pte_t b) | ||
410 | { | ||
411 | return pte_val(a) == pte_val(b); | ||
412 | } | ||
413 | |||
414 | #define update_mmu_cache(vma, address, pte) do { } while (0) | ||
415 | |||
416 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
417 | extern void paging_init (void); | ||
418 | |||
419 | /* | ||
420 | * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of | ||
421 | * bits in the swap-type field of the swap pte. It would be nice to | ||
422 | * enforce that, but we can't easily include <linux/swap.h> here. | ||
423 | * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...). | ||
424 | * | ||
425 | * Format of swap pte: | ||
426 | * bit 0 : present bit (must be zero) | ||
427 | * bit 1 : _PAGE_FILE (must be zero) | ||
428 | * bits 2- 8: swap-type | ||
429 | * bits 9-62: swap offset | ||
430 | * bit 63 : _PAGE_PROTNONE bit | ||
431 | * | ||
432 | * Format of file pte: | ||
433 | * bit 0 : present bit (must be zero) | ||
434 | * bit 1 : _PAGE_FILE (must be one) | ||
435 | * bits 2-62: file_offset/PAGE_SIZE | ||
436 | * bit 63 : _PAGE_PROTNONE bit | ||
437 | */ | ||
438 | #define __swp_type(entry) (((entry).val >> 2) & 0x7f) | ||
439 | #define __swp_offset(entry) (((entry).val << 1) >> 10) | ||
440 | #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) }) | ||
441 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
442 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
443 | |||
444 | #define PTE_FILE_MAX_BITS 61 | ||
445 | #define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3) | ||
446 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE }) | ||
447 | |||
448 | /* XXX is this right? */ | ||
449 | #define io_remap_page_range(vma, vaddr, paddr, size, prot) \ | ||
450 | remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot) | ||
451 | |||
452 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
453 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
454 | |||
455 | #define MK_IOSPACE_PFN(space, pfn) (pfn) | ||
456 | #define GET_IOSPACE(pfn) 0 | ||
457 | #define GET_PFN(pfn) (pfn) | ||
458 | |||
459 | /* | ||
460 | * ZERO_PAGE is a global shared page that is always zero: used | ||
461 | * for zero-mapped memory areas etc.. | ||
462 | */ | ||
463 | extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | ||
464 | extern struct page *zero_page_memmap_ptr; | ||
465 | #define ZERO_PAGE(vaddr) (zero_page_memmap_ptr) | ||
466 | |||
467 | /* We provide our own get_unmapped_area to cope with VA holes for userland */ | ||
468 | #define HAVE_ARCH_UNMAPPED_AREA | ||
469 | |||
470 | #ifdef CONFIG_HUGETLB_PAGE | ||
471 | #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3)) | ||
472 | #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT) | ||
473 | #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1)) | ||
474 | struct mmu_gather; | ||
475 | extern void hugetlb_free_pgtables(struct mmu_gather *tlb, | ||
476 | struct vm_area_struct * prev, unsigned long start, unsigned long end); | ||
477 | #endif | ||
478 | |||
479 | /* | ||
480 | * IA-64 doesn't have any external MMU info: the page tables contain all the necessary | ||
481 | * information. However, we use this routine to take care of any (delayed) i-cache | ||
482 | * flushing that may be necessary. | ||
483 | */ | ||
484 | extern void lazy_mmu_prot_update (pte_t pte); | ||
485 | |||
486 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | ||
487 | /* | ||
488 | * Update PTEP with ENTRY, which is guaranteed to be a less | ||
489 | * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and | ||
490 | * WRITABLE bits turned on, when the value at PTEP did not. The | ||
491 | * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE. | ||
492 | * | ||
493 | * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without | ||
494 | * having to worry about races. On SMP machines, there are only two | ||
495 | * cases where this is true: | ||
496 | * | ||
497 | * (1) *PTEP has the PRESENT bit turned OFF | ||
498 | * (2) ENTRY has the DIRTY bit turned ON | ||
499 | * | ||
500 | * On ia64, we could implement this routine with a cmpxchg()-loop | ||
501 | * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY. | ||
502 | * However, like on x86, we can get a more streamlined version by | ||
503 | * observing that it is OK to drop ACCESSED bit updates when | ||
504 | * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is | ||
505 | * result in an extra Access-bit fault, which would then turn on the | ||
506 | * ACCESSED bit in the low-level fault handler (iaccess_bit or | ||
507 | * daccess_bit in ivt.S). | ||
508 | */ | ||
509 | #ifdef CONFIG_SMP | ||
510 | # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ | ||
511 | do { \ | ||
512 | if (__safely_writable) { \ | ||
513 | set_pte(__ptep, __entry); \ | ||
514 | flush_tlb_page(__vma, __addr); \ | ||
515 | } \ | ||
516 | } while (0) | ||
517 | #else | ||
518 | # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \ | ||
519 | ptep_establish(__vma, __addr, __ptep, __entry) | ||
520 | #endif | ||
521 | |||
522 | # ifdef CONFIG_VIRTUAL_MEM_MAP | ||
523 | /* arch mem_map init routine is needed due to holes in a virtual mem_map */ | ||
524 | # define __HAVE_ARCH_MEMMAP_INIT | ||
525 | extern void memmap_init (unsigned long size, int nid, unsigned long zone, | ||
526 | unsigned long start_pfn); | ||
527 | # endif /* CONFIG_VIRTUAL_MEM_MAP */ | ||
528 | # endif /* !__ASSEMBLY__ */ | ||
529 | |||
530 | /* | ||
531 | * Identity-mapped regions use a large page size. We'll call such large pages | ||
532 | * "granules". If you can think of a better name that's unambiguous, let me | ||
533 | * know... | ||
534 | */ | ||
535 | #if defined(CONFIG_IA64_GRANULE_64MB) | ||
536 | # define IA64_GRANULE_SHIFT _PAGE_SIZE_64M | ||
537 | #elif defined(CONFIG_IA64_GRANULE_16MB) | ||
538 | # define IA64_GRANULE_SHIFT _PAGE_SIZE_16M | ||
539 | #endif | ||
540 | #define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT) | ||
541 | /* | ||
542 | * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL): | ||
543 | */ | ||
544 | #define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M | ||
545 | #define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT) | ||
546 | |||
547 | /* | ||
548 | * No page table caches to initialise | ||
549 | */ | ||
550 | #define pgtable_cache_init() do { } while (0) | ||
551 | |||
552 | /* These tell get_user_pages() that the first gate page is accessible from user-level. */ | ||
553 | #define FIXADDR_USER_START GATE_ADDR | ||
554 | #define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE) | ||
555 | |||
556 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
557 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
558 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
559 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
560 | #define __HAVE_ARCH_PTE_SAME | ||
561 | #define __HAVE_ARCH_PGD_OFFSET_GATE | ||
562 | #define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE | ||
563 | |||
564 | /* | ||
565 | * Override for pgd_addr_end() to deal with the virtual address space holes | ||
566 | * in each region. In regions 0..4 virtual address bits are used like this: | ||
567 | * +--------+------+--------+-----+-----+--------+ | ||
568 | * | pgdhi3 | rsvd | pgdlow | pmd | pte | offset | | ||
569 | * +--------+------+--------+-----+-----+--------+ | ||
570 | * 'pgdlow' overflows to pgdhi3 (a.k.a. region bits) leaving rsvd==0 | ||
571 | */ | ||
572 | #define IA64_PGD_OVERFLOW (PGDIR_SIZE << (PAGE_SHIFT-6)) | ||
573 | |||
574 | #define pgd_addr_end(addr, end) \ | ||
575 | ({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \ | ||
576 | if (REGION_NUMBER(__boundary) < 5 && \ | ||
577 | __boundary & IA64_PGD_OVERFLOW) \ | ||
578 | __boundary += (RGN_SIZE - 1) & ~(IA64_PGD_OVERFLOW - 1);\ | ||
579 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | ||
580 | }) | ||
581 | |||
582 | #define pmd_addr_end(addr, end) \ | ||
583 | ({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \ | ||
584 | if (REGION_NUMBER(__boundary) < 5 && \ | ||
585 | __boundary & IA64_PGD_OVERFLOW) \ | ||
586 | __boundary += (RGN_SIZE - 1) & ~(IA64_PGD_OVERFLOW - 1);\ | ||
587 | (__boundary - 1 < (end) - 1)? __boundary: (end); \ | ||
588 | }) | ||
589 | |||
590 | #include <asm-generic/pgtable-nopud.h> | ||
591 | #include <asm-generic/pgtable.h> | ||
592 | |||
593 | #endif /* _ASM_IA64_PGTABLE_H */ | ||