diff options
author | Kyle McMartin <kyle@mcmartin.ca> | 2008-07-28 23:02:13 -0400 |
---|---|---|
committer | Kyle McMartin <kyle@hera.kernel.org> | 2008-10-10 12:32:29 -0400 |
commit | deae26bf6a10e47983606f5df080b91e97650ead (patch) | |
tree | 84a8a68145d0f713d7c5a1f9e6b3b03be9b3a4c8 /include/asm-parisc/pgtable.h | |
parent | 6c86cb8237bf08443806089130dc108051569a93 (diff) |
parisc: move include/asm-parisc to arch/parisc/include/asm
Diffstat (limited to 'include/asm-parisc/pgtable.h')
-rw-r--r-- | include/asm-parisc/pgtable.h | 508 |
1 files changed, 0 insertions, 508 deletions
diff --git a/include/asm-parisc/pgtable.h b/include/asm-parisc/pgtable.h deleted file mode 100644 index 470a4b88124d..000000000000 --- a/include/asm-parisc/pgtable.h +++ /dev/null | |||
@@ -1,508 +0,0 @@ | |||
1 | #ifndef _PARISC_PGTABLE_H | ||
2 | #define _PARISC_PGTABLE_H | ||
3 | |||
4 | #include <asm-generic/4level-fixup.h> | ||
5 | |||
6 | #include <asm/fixmap.h> | ||
7 | |||
8 | #ifndef __ASSEMBLY__ | ||
9 | /* | ||
10 | * we simulate an x86-style page table for the linux mm code | ||
11 | */ | ||
12 | |||
13 | #include <linux/mm.h> /* for vm_area_struct */ | ||
14 | #include <linux/bitops.h> | ||
15 | #include <asm/processor.h> | ||
16 | #include <asm/cache.h> | ||
17 | |||
18 | /* | ||
19 | * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel | ||
20 | * memory. For the return value to be meaningful, ADDR must be >= | ||
21 | * PAGE_OFFSET. This operation can be relatively expensive (e.g., | ||
22 | * require a hash-, or multi-level tree-lookup or something of that | ||
23 | * sort) but it guarantees to return TRUE only if accessing the page | ||
24 | * at that address does not cause an error. Note that there may be | ||
25 | * addresses for which kern_addr_valid() returns FALSE even though an | ||
26 | * access would not cause an error (e.g., this is typically true for | ||
27 | * memory mapped I/O regions. | ||
28 | * | ||
29 | * XXX Need to implement this for parisc. | ||
30 | */ | ||
31 | #define kern_addr_valid(addr) (1) | ||
32 | |||
33 | /* Certain architectures need to do special things when PTEs | ||
34 | * within a page table are directly modified. Thus, the following | ||
35 | * hook is made available. | ||
36 | */ | ||
37 | #define set_pte(pteptr, pteval) \ | ||
38 | do{ \ | ||
39 | *(pteptr) = (pteval); \ | ||
40 | } while(0) | ||
41 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
42 | |||
43 | #endif /* !__ASSEMBLY__ */ | ||
44 | |||
45 | #define pte_ERROR(e) \ | ||
46 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | ||
47 | #define pmd_ERROR(e) \ | ||
48 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e)) | ||
49 | #define pgd_ERROR(e) \ | ||
50 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) | ||
51 | |||
52 | /* This is the size of the initially mapped kernel memory */ | ||
53 | #ifdef CONFIG_64BIT | ||
54 | #define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */ | ||
55 | #else | ||
56 | #define KERNEL_INITIAL_ORDER 23 /* 0 to 1<<23 = 8MB */ | ||
57 | #endif | ||
58 | #define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER) | ||
59 | |||
60 | #if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
61 | #define PT_NLEVELS 3 | ||
62 | #define PGD_ORDER 1 /* Number of pages per pgd */ | ||
63 | #define PMD_ORDER 1 /* Number of pages per pmd */ | ||
64 | #define PGD_ALLOC_ORDER 2 /* first pgd contains pmd */ | ||
65 | #else | ||
66 | #define PT_NLEVELS 2 | ||
67 | #define PGD_ORDER 1 /* Number of pages per pgd */ | ||
68 | #define PGD_ALLOC_ORDER PGD_ORDER | ||
69 | #endif | ||
70 | |||
71 | /* Definitions for 3rd level (we use PLD here for Page Lower directory | ||
72 | * because PTE_SHIFT is used lower down to mean shift that has to be | ||
73 | * done to get usable bits out of the PTE) */ | ||
74 | #define PLD_SHIFT PAGE_SHIFT | ||
75 | #define PLD_SIZE PAGE_SIZE | ||
76 | #define BITS_PER_PTE (PAGE_SHIFT - BITS_PER_PTE_ENTRY) | ||
77 | #define PTRS_PER_PTE (1UL << BITS_PER_PTE) | ||
78 | |||
79 | /* Definitions for 2nd level */ | ||
80 | #define pgtable_cache_init() do { } while (0) | ||
81 | |||
82 | #define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE) | ||
83 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
84 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
85 | #if PT_NLEVELS == 3 | ||
86 | #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) | ||
87 | #else | ||
88 | #define BITS_PER_PMD 0 | ||
89 | #endif | ||
90 | #define PTRS_PER_PMD (1UL << BITS_PER_PMD) | ||
91 | |||
92 | /* Definitions for 1st level */ | ||
93 | #define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD) | ||
94 | #define BITS_PER_PGD (PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) | ||
95 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
96 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
97 | #define PTRS_PER_PGD (1UL << BITS_PER_PGD) | ||
98 | #define USER_PTRS_PER_PGD PTRS_PER_PGD | ||
99 | |||
100 | #define MAX_ADDRBITS (PGDIR_SHIFT + BITS_PER_PGD) | ||
101 | #define MAX_ADDRESS (1UL << MAX_ADDRBITS) | ||
102 | |||
103 | #define SPACEID_SHIFT (MAX_ADDRBITS - 32) | ||
104 | |||
105 | /* This calculates the number of initial pages we need for the initial | ||
106 | * page tables */ | ||
107 | #if (KERNEL_INITIAL_ORDER) >= (PMD_SHIFT) | ||
108 | # define PT_INITIAL (1 << (KERNEL_INITIAL_ORDER - PMD_SHIFT)) | ||
109 | #else | ||
110 | # define PT_INITIAL (1) /* all initial PTEs fit into one page */ | ||
111 | #endif | ||
112 | |||
113 | /* | ||
114 | * pgd entries used up by user/kernel: | ||
115 | */ | ||
116 | |||
117 | #define FIRST_USER_ADDRESS 0 | ||
118 | |||
119 | /* NB: The tlb miss handlers make certain assumptions about the order */ | ||
120 | /* of the following bits, so be careful (One example, bits 25-31 */ | ||
121 | /* are moved together in one instruction). */ | ||
122 | |||
123 | #define _PAGE_READ_BIT 31 /* (0x001) read access allowed */ | ||
124 | #define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */ | ||
125 | #define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */ | ||
126 | #define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */ | ||
127 | #define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */ | ||
128 | #define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */ | ||
129 | #define _PAGE_FILE_BIT _PAGE_DIRTY_BIT /* overload this bit */ | ||
130 | #define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */ | ||
131 | #define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */ | ||
132 | #define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */ | ||
133 | #define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */ | ||
134 | #define _PAGE_FLUSH_BIT 21 /* (0x400) Software: translation valid */ | ||
135 | /* for cache flushing only */ | ||
136 | #define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */ | ||
137 | |||
138 | /* N.B. The bits are defined in terms of a 32 bit word above, so the */ | ||
139 | /* following macro is ok for both 32 and 64 bit. */ | ||
140 | |||
141 | #define xlate_pabit(x) (31 - x) | ||
142 | |||
143 | /* this defines the shift to the usable bits in the PTE it is set so | ||
144 | * that the valid bits _PAGE_PRESENT_BIT and _PAGE_USER_BIT are set | ||
145 | * to zero */ | ||
146 | #define PTE_SHIFT xlate_pabit(_PAGE_USER_BIT) | ||
147 | |||
148 | /* PFN_PTE_SHIFT defines the shift of a PTE value to access the PFN field */ | ||
149 | #define PFN_PTE_SHIFT 12 | ||
150 | |||
151 | |||
152 | /* this is how many bits may be used by the file functions */ | ||
153 | #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) | ||
154 | |||
155 | #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT) | ||
156 | #define pgoff_to_pte(off) ((pte_t) { ((off) << PTE_SHIFT) | _PAGE_FILE }) | ||
157 | |||
158 | #define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT)) | ||
159 | #define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT)) | ||
160 | #define _PAGE_RW (_PAGE_READ | _PAGE_WRITE) | ||
161 | #define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT)) | ||
162 | #define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT)) | ||
163 | #define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT)) | ||
164 | #define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT)) | ||
165 | #define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT)) | ||
166 | #define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT)) | ||
167 | #define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT)) | ||
168 | #define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT)) | ||
169 | #define _PAGE_FLUSH (1 << xlate_pabit(_PAGE_FLUSH_BIT)) | ||
170 | #define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT)) | ||
171 | #define _PAGE_FILE (1 << xlate_pabit(_PAGE_FILE_BIT)) | ||
172 | |||
173 | #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
174 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
175 | #define _PAGE_KERNEL (_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED) | ||
176 | |||
177 | /* The pgd/pmd contains a ptr (in phys addr space); since all pgds/pmds | ||
178 | * are page-aligned, we don't care about the PAGE_OFFSET bits, except | ||
179 | * for a few meta-information bits, so we shift the address to be | ||
180 | * able to effectively address 40/42/44-bits of physical address space | ||
181 | * depending on 4k/16k/64k PAGE_SIZE */ | ||
182 | #define _PxD_PRESENT_BIT 31 | ||
183 | #define _PxD_ATTACHED_BIT 30 | ||
184 | #define _PxD_VALID_BIT 29 | ||
185 | |||
186 | #define PxD_FLAG_PRESENT (1 << xlate_pabit(_PxD_PRESENT_BIT)) | ||
187 | #define PxD_FLAG_ATTACHED (1 << xlate_pabit(_PxD_ATTACHED_BIT)) | ||
188 | #define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT)) | ||
189 | #define PxD_FLAG_MASK (0xf) | ||
190 | #define PxD_FLAG_SHIFT (4) | ||
191 | #define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */ | ||
192 | |||
193 | #ifndef __ASSEMBLY__ | ||
194 | |||
195 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED) | ||
196 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) | ||
197 | /* Others seem to make this executable, I don't know if that's correct | ||
198 | or not. The stack is mapped this way though so this is necessary | ||
199 | in the short term - dhd@linuxcare.com, 2000-08-08 */ | ||
200 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) | ||
201 | #define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED) | ||
202 | #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) | ||
203 | #define PAGE_COPY PAGE_EXECREAD | ||
204 | #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) | ||
205 | #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) | ||
206 | #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) | ||
207 | #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) | ||
208 | #define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ) | ||
209 | #define PAGE_FLUSH __pgprot(_PAGE_FLUSH) | ||
210 | |||
211 | |||
212 | /* | ||
213 | * We could have an execute only page using "gateway - promote to priv | ||
214 | * level 3", but that is kind of silly. So, the way things are defined | ||
215 | * now, we must always have read permission for pages with execute | ||
216 | * permission. For the fun of it we'll go ahead and support write only | ||
217 | * pages. | ||
218 | */ | ||
219 | |||
220 | /*xwr*/ | ||
221 | #define __P000 PAGE_NONE | ||
222 | #define __P001 PAGE_READONLY | ||
223 | #define __P010 __P000 /* copy on write */ | ||
224 | #define __P011 __P001 /* copy on write */ | ||
225 | #define __P100 PAGE_EXECREAD | ||
226 | #define __P101 PAGE_EXECREAD | ||
227 | #define __P110 __P100 /* copy on write */ | ||
228 | #define __P111 __P101 /* copy on write */ | ||
229 | |||
230 | #define __S000 PAGE_NONE | ||
231 | #define __S001 PAGE_READONLY | ||
232 | #define __S010 PAGE_WRITEONLY | ||
233 | #define __S011 PAGE_SHARED | ||
234 | #define __S100 PAGE_EXECREAD | ||
235 | #define __S101 PAGE_EXECREAD | ||
236 | #define __S110 PAGE_RWX | ||
237 | #define __S111 PAGE_RWX | ||
238 | |||
239 | |||
240 | extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */ | ||
241 | |||
242 | /* initial page tables for 0-8MB for kernel */ | ||
243 | |||
244 | extern pte_t pg0[]; | ||
245 | |||
246 | /* zero page used for uninitialized stuff */ | ||
247 | |||
248 | extern unsigned long *empty_zero_page; | ||
249 | |||
250 | /* | ||
251 | * ZERO_PAGE is a global shared page that is always zero: used | ||
252 | * for zero-mapped memory areas etc.. | ||
253 | */ | ||
254 | |||
255 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
256 | |||
257 | #define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH)) | ||
258 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) | ||
259 | #define pte_clear(mm,addr,xp) do { pte_val(*(xp)) = 0; } while (0) | ||
260 | |||
261 | #define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK) | ||
262 | #define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) | ||
263 | #define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK) | ||
264 | #define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT) | ||
265 | |||
266 | #if PT_NLEVELS == 3 | ||
267 | /* The first entry of the permanent pmd is not there if it contains | ||
268 | * the gateway marker */ | ||
269 | #define pmd_none(x) (!pmd_val(x) || pmd_flag(x) == PxD_FLAG_ATTACHED) | ||
270 | #else | ||
271 | #define pmd_none(x) (!pmd_val(x)) | ||
272 | #endif | ||
273 | #define pmd_bad(x) (!(pmd_flag(x) & PxD_FLAG_VALID)) | ||
274 | #define pmd_present(x) (pmd_flag(x) & PxD_FLAG_PRESENT) | ||
275 | static inline void pmd_clear(pmd_t *pmd) { | ||
276 | #if PT_NLEVELS == 3 | ||
277 | if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | ||
278 | /* This is the entry pointing to the permanent pmd | ||
279 | * attached to the pgd; cannot clear it */ | ||
280 | __pmd_val_set(*pmd, PxD_FLAG_ATTACHED); | ||
281 | else | ||
282 | #endif | ||
283 | __pmd_val_set(*pmd, 0); | ||
284 | } | ||
285 | |||
286 | |||
287 | |||
288 | #if PT_NLEVELS == 3 | ||
289 | #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd))) | ||
290 | #define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd)) | ||
291 | |||
292 | /* For 64 bit we have three level tables */ | ||
293 | |||
294 | #define pgd_none(x) (!pgd_val(x)) | ||
295 | #define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID)) | ||
296 | #define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT) | ||
297 | static inline void pgd_clear(pgd_t *pgd) { | ||
298 | #if PT_NLEVELS == 3 | ||
299 | if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) | ||
300 | /* This is the permanent pmd attached to the pgd; cannot | ||
301 | * free it */ | ||
302 | return; | ||
303 | #endif | ||
304 | __pgd_val_set(*pgd, 0); | ||
305 | } | ||
306 | #else | ||
307 | /* | ||
308 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
309 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
310 | * into the pgd entry) | ||
311 | */ | ||
312 | static inline int pgd_none(pgd_t pgd) { return 0; } | ||
313 | static inline int pgd_bad(pgd_t pgd) { return 0; } | ||
314 | static inline int pgd_present(pgd_t pgd) { return 1; } | ||
315 | static inline void pgd_clear(pgd_t * pgdp) { } | ||
316 | #endif | ||
317 | |||
318 | /* | ||
319 | * The following only work if pte_present() is true. | ||
320 | * Undefined behaviour if not.. | ||
321 | */ | ||
322 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
323 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
324 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; } | ||
325 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
326 | static inline int pte_special(pte_t pte) { return 0; } | ||
327 | |||
328 | static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } | ||
329 | static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | ||
330 | static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~_PAGE_WRITE; return pte; } | ||
331 | static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
332 | static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
333 | static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return pte; } | ||
334 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | ||
335 | |||
336 | /* | ||
337 | * Conversion functions: convert a page and protection to a page entry, | ||
338 | * and a page entry and page directory to the page they refer to. | ||
339 | */ | ||
340 | #define __mk_pte(addr,pgprot) \ | ||
341 | ({ \ | ||
342 | pte_t __pte; \ | ||
343 | \ | ||
344 | pte_val(__pte) = ((((addr)>>PAGE_SHIFT)<<PFN_PTE_SHIFT) + pgprot_val(pgprot)); \ | ||
345 | \ | ||
346 | __pte; \ | ||
347 | }) | ||
348 | |||
349 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
350 | |||
351 | static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | ||
352 | { | ||
353 | pte_t pte; | ||
354 | pte_val(pte) = (pfn << PFN_PTE_SHIFT) | pgprot_val(pgprot); | ||
355 | return pte; | ||
356 | } | ||
357 | |||
358 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
359 | { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } | ||
360 | |||
361 | /* Permanent address of a page. On parisc we don't have highmem. */ | ||
362 | |||
363 | #define pte_pfn(x) (pte_val(x) >> PFN_PTE_SHIFT) | ||
364 | |||
365 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | ||
366 | |||
367 | #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_address(pmd))) | ||
368 | |||
369 | #define __pmd_page(pmd) ((unsigned long) __va(pmd_address(pmd))) | ||
370 | #define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) | ||
371 | |||
372 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | ||
373 | |||
374 | /* to find an entry in a page-table-directory */ | ||
375 | #define pgd_offset(mm, address) \ | ||
376 | ((mm)->pgd + ((address) >> PGDIR_SHIFT)) | ||
377 | |||
378 | /* to find an entry in a kernel page-table-directory */ | ||
379 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
380 | |||
381 | /* Find an entry in the second-level page table.. */ | ||
382 | |||
383 | #if PT_NLEVELS == 3 | ||
384 | #define pmd_offset(dir,address) \ | ||
385 | ((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1))) | ||
386 | #else | ||
387 | #define pmd_offset(dir,addr) ((pmd_t *) dir) | ||
388 | #endif | ||
389 | |||
390 | /* Find an entry in the third-level page table.. */ | ||
391 | #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1)) | ||
392 | #define pte_offset_kernel(pmd, address) \ | ||
393 | ((pte_t *) pmd_page_vaddr(*(pmd)) + pte_index(address)) | ||
394 | #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address) | ||
395 | #define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address) | ||
396 | #define pte_unmap(pte) do { } while (0) | ||
397 | #define pte_unmap_nested(pte) do { } while (0) | ||
398 | |||
399 | #define pte_unmap(pte) do { } while (0) | ||
400 | #define pte_unmap_nested(pte) do { } while (0) | ||
401 | |||
402 | extern void paging_init (void); | ||
403 | |||
404 | /* Used for deferring calls to flush_dcache_page() */ | ||
405 | |||
406 | #define PG_dcache_dirty PG_arch_1 | ||
407 | |||
408 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | ||
409 | |||
410 | /* Encode and de-code a swap entry */ | ||
411 | |||
412 | #define __swp_type(x) ((x).val & 0x1f) | ||
413 | #define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \ | ||
414 | (((x).val >> 8) & ~0x7) ) | ||
415 | #define __swp_entry(type, offset) ((swp_entry_t) { (type) | \ | ||
416 | ((offset & 0x7) << 6) | \ | ||
417 | ((offset & ~0x7) << 8) }) | ||
418 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
419 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
420 | |||
421 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) | ||
422 | { | ||
423 | #ifdef CONFIG_SMP | ||
424 | if (!pte_young(*ptep)) | ||
425 | return 0; | ||
426 | return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), &pte_val(*ptep)); | ||
427 | #else | ||
428 | pte_t pte = *ptep; | ||
429 | if (!pte_young(pte)) | ||
430 | return 0; | ||
431 | set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte)); | ||
432 | return 1; | ||
433 | #endif | ||
434 | } | ||
435 | |||
436 | extern spinlock_t pa_dbit_lock; | ||
437 | |||
438 | struct mm_struct; | ||
439 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
440 | { | ||
441 | pte_t old_pte; | ||
442 | pte_t pte; | ||
443 | |||
444 | spin_lock(&pa_dbit_lock); | ||
445 | pte = old_pte = *ptep; | ||
446 | pte_val(pte) &= ~_PAGE_PRESENT; | ||
447 | pte_val(pte) |= _PAGE_FLUSH; | ||
448 | set_pte_at(mm,addr,ptep,pte); | ||
449 | spin_unlock(&pa_dbit_lock); | ||
450 | |||
451 | return old_pte; | ||
452 | } | ||
453 | |||
454 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
455 | { | ||
456 | #ifdef CONFIG_SMP | ||
457 | unsigned long new, old; | ||
458 | |||
459 | do { | ||
460 | old = pte_val(*ptep); | ||
461 | new = pte_val(pte_wrprotect(__pte (old))); | ||
462 | } while (cmpxchg((unsigned long *) ptep, old, new) != old); | ||
463 | #else | ||
464 | pte_t old_pte = *ptep; | ||
465 | set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); | ||
466 | #endif | ||
467 | } | ||
468 | |||
469 | #define pte_same(A,B) (pte_val(A) == pte_val(B)) | ||
470 | |||
471 | #endif /* !__ASSEMBLY__ */ | ||
472 | |||
473 | |||
474 | /* TLB page size encoding - see table 3-1 in parisc20.pdf */ | ||
475 | #define _PAGE_SIZE_ENCODING_4K 0 | ||
476 | #define _PAGE_SIZE_ENCODING_16K 1 | ||
477 | #define _PAGE_SIZE_ENCODING_64K 2 | ||
478 | #define _PAGE_SIZE_ENCODING_256K 3 | ||
479 | #define _PAGE_SIZE_ENCODING_1M 4 | ||
480 | #define _PAGE_SIZE_ENCODING_4M 5 | ||
481 | #define _PAGE_SIZE_ENCODING_16M 6 | ||
482 | #define _PAGE_SIZE_ENCODING_64M 7 | ||
483 | |||
484 | #if defined(CONFIG_PARISC_PAGE_SIZE_4KB) | ||
485 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4K | ||
486 | #elif defined(CONFIG_PARISC_PAGE_SIZE_16KB) | ||
487 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16K | ||
488 | #elif defined(CONFIG_PARISC_PAGE_SIZE_64KB) | ||
489 | # define _PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_64K | ||
490 | #endif | ||
491 | |||
492 | |||
493 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
494 | remap_pfn_range(vma, vaddr, pfn, size, prot) | ||
495 | |||
496 | #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_NO_CACHE) | ||
497 | |||
498 | /* We provide our own get_unmapped_area to provide cache coherency */ | ||
499 | |||
500 | #define HAVE_ARCH_UNMAPPED_AREA | ||
501 | |||
502 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
503 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
504 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
505 | #define __HAVE_ARCH_PTE_SAME | ||
506 | #include <asm-generic/pgtable.h> | ||
507 | |||
508 | #endif /* _PARISC_PGTABLE_H */ | ||