diff options
Diffstat (limited to 'include/asm-mn10300/pgtable.h')
-rw-r--r-- | include/asm-mn10300/pgtable.h | 492 |
1 files changed, 0 insertions, 492 deletions
diff --git a/include/asm-mn10300/pgtable.h b/include/asm-mn10300/pgtable.h deleted file mode 100644 index 6dc30fc827c4..000000000000 --- a/include/asm-mn10300/pgtable.h +++ /dev/null | |||
@@ -1,492 +0,0 @@ | |||
1 | /* MN10300 Page table manipulators and constants | ||
2 | * | ||
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | * | ||
11 | * | ||
12 | * The Linux memory management assumes a three-level page table setup. On | ||
13 | * the i386, we use that, but "fold" the mid level into the top-level page | ||
14 | * table, so that we physically have the same two-level page table as the | ||
15 | * i386 mmu expects. | ||
16 | * | ||
17 | * This file contains the functions and defines necessary to modify and use | ||
18 | * the i386 page table tree for the purposes of the MN10300 TLB handler | ||
19 | * functions. | ||
20 | */ | ||
21 | #ifndef _ASM_PGTABLE_H | ||
22 | #define _ASM_PGTABLE_H | ||
23 | |||
24 | #include <asm/cpu-regs.h> | ||
25 | |||
26 | #ifndef __ASSEMBLY__ | ||
27 | #include <asm/processor.h> | ||
28 | #include <asm/cache.h> | ||
29 | #include <linux/threads.h> | ||
30 | |||
31 | #include <asm/bitops.h> | ||
32 | |||
33 | #include <linux/slab.h> | ||
34 | #include <linux/list.h> | ||
35 | #include <linux/spinlock.h> | ||
36 | |||
37 | /* | ||
38 | * ZERO_PAGE is a global shared page that is always zero: used | ||
39 | * for zero-mapped memory areas etc.. | ||
40 | */ | ||
41 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | ||
42 | extern unsigned long empty_zero_page[1024]; | ||
43 | extern spinlock_t pgd_lock; | ||
44 | extern struct page *pgd_list; | ||
45 | |||
46 | extern void pmd_ctor(void *, struct kmem_cache *, unsigned long); | ||
47 | extern void pgtable_cache_init(void); | ||
48 | extern void paging_init(void); | ||
49 | |||
50 | #endif /* !__ASSEMBLY__ */ | ||
51 | |||
52 | /* | ||
53 | * The Linux mn10300 paging architecture only implements both the traditional | ||
54 | * 2-level page tables | ||
55 | */ | ||
56 | #define PGDIR_SHIFT 22 | ||
57 | #define PTRS_PER_PGD 1024 | ||
58 | #define PTRS_PER_PUD 1 /* we don't really have any PUD physically */ | ||
59 | #define PTRS_PER_PMD 1 /* we don't really have any PMD physically */ | ||
60 | #define PTRS_PER_PTE 1024 | ||
61 | |||
62 | #define PGD_SIZE PAGE_SIZE | ||
63 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
64 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
65 | #define PGDIR_MASK (~(PGDIR_SIZE - 1)) | ||
66 | |||
67 | #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) | ||
68 | #define FIRST_USER_ADDRESS 0 | ||
69 | |||
70 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) | ||
71 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - USER_PGD_PTRS) | ||
72 | |||
73 | #define TWOLEVEL_PGDIR_SHIFT 22 | ||
74 | #define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT) | ||
75 | #define BOOT_KERNEL_PGD_PTRS (1024 - BOOT_USER_PGD_PTRS) | ||
76 | |||
77 | #ifndef __ASSEMBLY__ | ||
78 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | ||
79 | #endif | ||
80 | |||
81 | /* | ||
82 | * Unfortunately, due to the way the MMU works on the MN10300, the vmalloc VM | ||
83 | * area has to be in the lower half of the virtual address range (the upper | ||
84 | * half is not translated through the TLB). | ||
85 | * | ||
86 | * So in this case, the vmalloc area goes at the bottom of the address map | ||
87 | * (leaving a hole at the very bottom to catch addressing errors), and | ||
88 | * userspace starts immediately above. | ||
89 | * | ||
90 | * The vmalloc() routines also leaves a hole of 4kB between each vmalloced | ||
91 | * area to catch addressing errors. | ||
92 | */ | ||
93 | #define VMALLOC_OFFSET (8 * 1024 * 1024) | ||
94 | #define VMALLOC_START (0x70000000) | ||
95 | #define VMALLOC_END (0x7C000000) | ||
96 | |||
97 | #ifndef __ASSEMBLY__ | ||
98 | extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE]; | ||
99 | #endif | ||
100 | |||
101 | /* IPTEL/DPTEL bit assignments */ | ||
102 | #define _PAGE_BIT_VALID xPTEL_V_BIT | ||
103 | #define _PAGE_BIT_ACCESSED xPTEL_UNUSED1_BIT /* mustn't be loaded into IPTEL/DPTEL */ | ||
104 | #define _PAGE_BIT_NX xPTEL_UNUSED2_BIT /* mustn't be loaded into IPTEL/DPTEL */ | ||
105 | #define _PAGE_BIT_CACHE xPTEL_C_BIT | ||
106 | #define _PAGE_BIT_PRESENT xPTEL_PV_BIT | ||
107 | #define _PAGE_BIT_DIRTY xPTEL_D_BIT | ||
108 | #define _PAGE_BIT_GLOBAL xPTEL_G_BIT | ||
109 | |||
110 | #define _PAGE_VALID xPTEL_V | ||
111 | #define _PAGE_ACCESSED xPTEL_UNUSED1 | ||
112 | #define _PAGE_NX xPTEL_UNUSED2 /* no-execute bit */ | ||
113 | #define _PAGE_CACHE xPTEL_C | ||
114 | #define _PAGE_PRESENT xPTEL_PV | ||
115 | #define _PAGE_DIRTY xPTEL_D | ||
116 | #define _PAGE_PROT xPTEL_PR | ||
117 | #define _PAGE_PROT_RKNU xPTEL_PR_ROK | ||
118 | #define _PAGE_PROT_WKNU xPTEL_PR_RWK | ||
119 | #define _PAGE_PROT_RKRU xPTEL_PR_ROK_ROU | ||
120 | #define _PAGE_PROT_WKRU xPTEL_PR_RWK_ROU | ||
121 | #define _PAGE_PROT_WKWU xPTEL_PR_RWK_RWU | ||
122 | #define _PAGE_GLOBAL xPTEL_G | ||
123 | #define _PAGE_PSE xPTEL_PS_4Mb /* 4MB page */ | ||
124 | |||
125 | #define _PAGE_FILE xPTEL_UNUSED1_BIT /* set:pagecache unset:swap */ | ||
126 | |||
127 | #define __PAGE_PROT_UWAUX 0x040 | ||
128 | #define __PAGE_PROT_USER 0x080 | ||
129 | #define __PAGE_PROT_WRITE 0x100 | ||
130 | |||
131 | #define _PAGE_PRESENTV (_PAGE_PRESENT|_PAGE_VALID) | ||
132 | #define _PAGE_PROTNONE 0x000 /* If not present */ | ||
133 | |||
134 | #ifndef __ASSEMBLY__ | ||
135 | |||
136 | #define VMALLOC_VMADDR(x) ((unsigned long)(x)) | ||
137 | |||
138 | #define _PAGE_TABLE (_PAGE_PRESENTV | _PAGE_PROT_WKNU | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
139 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
140 | |||
141 | #define __PAGE_NONE (_PAGE_PRESENTV | _PAGE_PROT_RKNU | _PAGE_ACCESSED | _PAGE_CACHE) | ||
142 | #define __PAGE_SHARED (_PAGE_PRESENTV | _PAGE_PROT_WKWU | _PAGE_ACCESSED | _PAGE_CACHE) | ||
143 | #define __PAGE_COPY (_PAGE_PRESENTV | _PAGE_PROT_RKRU | _PAGE_ACCESSED | _PAGE_CACHE) | ||
144 | #define __PAGE_READONLY (_PAGE_PRESENTV | _PAGE_PROT_RKRU | _PAGE_ACCESSED | _PAGE_CACHE) | ||
145 | |||
146 | #define PAGE_NONE __pgprot(__PAGE_NONE | _PAGE_NX) | ||
147 | #define PAGE_SHARED_NOEXEC __pgprot(__PAGE_SHARED | _PAGE_NX) | ||
148 | #define PAGE_COPY_NOEXEC __pgprot(__PAGE_COPY | _PAGE_NX) | ||
149 | #define PAGE_READONLY_NOEXEC __pgprot(__PAGE_READONLY | _PAGE_NX) | ||
150 | #define PAGE_SHARED_EXEC __pgprot(__PAGE_SHARED) | ||
151 | #define PAGE_COPY_EXEC __pgprot(__PAGE_COPY) | ||
152 | #define PAGE_READONLY_EXEC __pgprot(__PAGE_READONLY) | ||
153 | #define PAGE_COPY PAGE_COPY_NOEXEC | ||
154 | #define PAGE_READONLY PAGE_READONLY_NOEXEC | ||
155 | #define PAGE_SHARED PAGE_SHARED_EXEC | ||
156 | |||
157 | #define __PAGE_KERNEL_BASE (_PAGE_PRESENTV | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) | ||
158 | |||
159 | #define __PAGE_KERNEL (__PAGE_KERNEL_BASE | _PAGE_PROT_WKNU | _PAGE_CACHE | _PAGE_NX) | ||
160 | #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL_BASE | _PAGE_PROT_WKNU | _PAGE_NX) | ||
161 | #define __PAGE_KERNEL_EXEC (__PAGE_KERNEL & ~_PAGE_NX) | ||
162 | #define __PAGE_KERNEL_RO (__PAGE_KERNEL_BASE | _PAGE_PROT_RKNU | _PAGE_CACHE | _PAGE_NX) | ||
163 | #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) | ||
164 | #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) | ||
165 | |||
166 | #define PAGE_KERNEL __pgprot(__PAGE_KERNEL) | ||
167 | #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO) | ||
168 | #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) | ||
169 | #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE) | ||
170 | #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE) | ||
171 | #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC) | ||
172 | |||
173 | /* | ||
174 | * Whilst the MN10300 can do page protection for execute (given separate data | ||
175 | * and insn TLBs), we are not supporting it at the moment. Write permission, | ||
176 | * however, always implies read permission (but not execute permission). | ||
177 | */ | ||
178 | #define __P000 PAGE_NONE | ||
179 | #define __P001 PAGE_READONLY_NOEXEC | ||
180 | #define __P010 PAGE_COPY_NOEXEC | ||
181 | #define __P011 PAGE_COPY_NOEXEC | ||
182 | #define __P100 PAGE_READONLY_EXEC | ||
183 | #define __P101 PAGE_READONLY_EXEC | ||
184 | #define __P110 PAGE_COPY_EXEC | ||
185 | #define __P111 PAGE_COPY_EXEC | ||
186 | |||
187 | #define __S000 PAGE_NONE | ||
188 | #define __S001 PAGE_READONLY_NOEXEC | ||
189 | #define __S010 PAGE_SHARED_NOEXEC | ||
190 | #define __S011 PAGE_SHARED_NOEXEC | ||
191 | #define __S100 PAGE_READONLY_EXEC | ||
192 | #define __S101 PAGE_READONLY_EXEC | ||
193 | #define __S110 PAGE_SHARED_EXEC | ||
194 | #define __S111 PAGE_SHARED_EXEC | ||
195 | |||
196 | /* | ||
197 | * Define this to warn about kernel memory accesses that are | ||
198 | * done without a 'verify_area(VERIFY_WRITE,..)' | ||
199 | */ | ||
200 | #undef TEST_VERIFY_AREA | ||
201 | |||
202 | #define pte_present(x) (pte_val(x) & _PAGE_VALID) | ||
203 | #define pte_clear(mm, addr, xp) \ | ||
204 | do { \ | ||
205 | set_pte_at((mm), (addr), (xp), __pte(0)); \ | ||
206 | } while (0) | ||
207 | |||
208 | #define pmd_none(x) (!pmd_val(x)) | ||
209 | #define pmd_present(x) (!pmd_none(x)) | ||
210 | #define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0) | ||
211 | #define pmd_bad(x) 0 | ||
212 | |||
213 | |||
214 | #define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT)) | ||
215 | |||
216 | #ifndef __ASSEMBLY__ | ||
217 | |||
218 | /* | ||
219 | * The following only work if pte_present() is true. | ||
220 | * Undefined behaviour if not.. | ||
221 | */ | ||
222 | static inline int pte_user(pte_t pte) { return pte_val(pte) & __PAGE_PROT_USER; } | ||
223 | static inline int pte_read(pte_t pte) { return pte_val(pte) & __PAGE_PROT_USER; } | ||
224 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | ||
225 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | ||
226 | static inline int pte_write(pte_t pte) { return pte_val(pte) & __PAGE_PROT_WRITE; } | ||
227 | static inline int pte_special(pte_t pte){ return 0; } | ||
228 | |||
229 | /* | ||
230 | * The following only works if pte_present() is not true. | ||
231 | */ | ||
232 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
233 | |||
234 | static inline pte_t pte_rdprotect(pte_t pte) | ||
235 | { | ||
236 | pte_val(pte) &= ~(__PAGE_PROT_USER|__PAGE_PROT_UWAUX); return pte; | ||
237 | } | ||
238 | static inline pte_t pte_exprotect(pte_t pte) | ||
239 | { | ||
240 | pte_val(pte) |= _PAGE_NX; return pte; | ||
241 | } | ||
242 | |||
243 | static inline pte_t pte_wrprotect(pte_t pte) | ||
244 | { | ||
245 | pte_val(pte) &= ~(__PAGE_PROT_WRITE|__PAGE_PROT_UWAUX); return pte; | ||
246 | } | ||
247 | |||
248 | static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } | ||
249 | static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | ||
250 | static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
251 | static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
252 | static inline pte_t pte_mkexec(pte_t pte) { pte_val(pte) &= ~_PAGE_NX; return pte; } | ||
253 | |||
254 | static inline pte_t pte_mkread(pte_t pte) | ||
255 | { | ||
256 | pte_val(pte) |= __PAGE_PROT_USER; | ||
257 | if (pte_write(pte)) | ||
258 | pte_val(pte) |= __PAGE_PROT_UWAUX; | ||
259 | return pte; | ||
260 | } | ||
261 | static inline pte_t pte_mkwrite(pte_t pte) | ||
262 | { | ||
263 | pte_val(pte) |= __PAGE_PROT_WRITE; | ||
264 | if (pte_val(pte) & __PAGE_PROT_USER) | ||
265 | pte_val(pte) |= __PAGE_PROT_UWAUX; | ||
266 | return pte; | ||
267 | } | ||
268 | |||
269 | static inline pte_t pte_mkspecial(pte_t pte) { return pte; } | ||
270 | |||
271 | #define pte_ERROR(e) \ | ||
272 | printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \ | ||
273 | __FILE__, __LINE__, pte_val(e)) | ||
274 | #define pgd_ERROR(e) \ | ||
275 | printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \ | ||
276 | __FILE__, __LINE__, pgd_val(e)) | ||
277 | |||
278 | /* | ||
279 | * The "pgd_xxx()" functions here are trivial for a folded two-level | ||
280 | * setup: the pgd is never bad, and a pmd always exists (as it's folded | ||
281 | * into the pgd entry) | ||
282 | */ | ||
283 | #define pgd_clear(xp) do { } while (0) | ||
284 | |||
285 | /* | ||
286 | * Certain architectures need to do special things when PTEs | ||
287 | * within a page table are directly modified. Thus, the following | ||
288 | * hook is made available. | ||
289 | */ | ||
290 | #define set_pte(pteptr, pteval) (*(pteptr) = pteval) | ||
291 | #define set_pte_at(mm, addr, ptep, pteval) set_pte((ptep), (pteval)) | ||
292 | #define set_pte_atomic(pteptr, pteval) set_pte((pteptr), (pteval)) | ||
293 | |||
294 | /* | ||
295 | * (pmds are folded into pgds so this doesn't get actually called, | ||
296 | * but the define is needed for a generic inline function.) | ||
297 | */ | ||
298 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | ||
299 | |||
300 | #define ptep_get_and_clear(mm, addr, ptep) \ | ||
301 | __pte(xchg(&(ptep)->pte, 0)) | ||
302 | #define pte_same(a, b) (pte_val(a) == pte_val(b)) | ||
303 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | ||
304 | #define pte_none(x) (!pte_val(x)) | ||
305 | #define pte_pfn(x) ((unsigned long) (pte_val(x) >> PAGE_SHIFT)) | ||
306 | #define __pfn_addr(pfn) ((pfn) << PAGE_SHIFT) | ||
307 | #define pfn_pte(pfn, prot) __pte(__pfn_addr(pfn) | pgprot_val(prot)) | ||
308 | #define pfn_pmd(pfn, prot) __pmd(__pfn_addr(pfn) | pgprot_val(prot)) | ||
309 | |||
310 | /* | ||
311 | * All present user pages are user-executable: | ||
312 | */ | ||
313 | static inline int pte_exec(pte_t pte) | ||
314 | { | ||
315 | return pte_user(pte); | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * All present pages are kernel-executable: | ||
320 | */ | ||
321 | static inline int pte_exec_kernel(pte_t pte) | ||
322 | { | ||
323 | return 1; | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * Bits 0 and 1 are taken, split up the 29 bits of offset | ||
328 | * into this range: | ||
329 | */ | ||
330 | #define PTE_FILE_MAX_BITS 29 | ||
331 | |||
332 | #define pte_to_pgoff(pte) (pte_val(pte) >> 2) | ||
333 | #define pgoff_to_pte(off) __pte((off) << 2 | _PAGE_FILE) | ||
334 | |||
335 | /* Encode and de-code a swap entry */ | ||
336 | #define __swp_type(x) (((x).val >> 2) & 0x3f) | ||
337 | #define __swp_offset(x) ((x).val >> 8) | ||
338 | #define __swp_entry(type, offset) \ | ||
339 | ((swp_entry_t) { ((type) << 2) | ((offset) << 8) }) | ||
340 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
341 | #define __swp_entry_to_pte(x) __pte((x).val) | ||
342 | |||
343 | static inline | ||
344 | int ptep_test_and_clear_dirty(struct vm_area_struct *vma, unsigned long addr, | ||
345 | pte_t *ptep) | ||
346 | { | ||
347 | if (!pte_dirty(*ptep)) | ||
348 | return 0; | ||
349 | return test_and_clear_bit(_PAGE_BIT_DIRTY, &ptep->pte); | ||
350 | } | ||
351 | |||
352 | static inline | ||
353 | int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, | ||
354 | pte_t *ptep) | ||
355 | { | ||
356 | if (!pte_young(*ptep)) | ||
357 | return 0; | ||
358 | return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); | ||
359 | } | ||
360 | |||
361 | static inline | ||
362 | void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | ||
363 | { | ||
364 | pte_val(*ptep) &= ~(__PAGE_PROT_WRITE|__PAGE_PROT_UWAUX); | ||
365 | } | ||
366 | |||
367 | static inline void ptep_mkdirty(pte_t *ptep) | ||
368 | { | ||
369 | set_bit(_PAGE_BIT_DIRTY, &ptep->pte); | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Macro to mark a page protection value as "uncacheable". On processors which | ||
374 | * do not support it, this is a no-op. | ||
375 | */ | ||
376 | #define pgprot_noncached(prot) __pgprot(pgprot_val(prot) | _PAGE_CACHE) | ||
377 | |||
378 | |||
379 | /* | ||
380 | * Conversion functions: convert a page and protection to a page entry, | ||
381 | * and a page entry and page directory to the page they refer to. | ||
382 | */ | ||
383 | |||
384 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | ||
385 | #define mk_pte_huge(entry) \ | ||
386 | ((entry).pte |= _PAGE_PRESENT | _PAGE_PSE | _PAGE_VALID) | ||
387 | |||
388 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
389 | { | ||
390 | pte_val(pte) &= _PAGE_CHG_MASK; | ||
391 | pte_val(pte) |= pgprot_val(newprot); | ||
392 | return pte; | ||
393 | } | ||
394 | |||
395 | #define page_pte(page) page_pte_prot((page), __pgprot(0)) | ||
396 | |||
397 | #define pmd_page_kernel(pmd) \ | ||
398 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) | ||
399 | |||
400 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) | ||
401 | |||
402 | #define pmd_large(pmd) \ | ||
403 | ((pmd_val(pmd) & (_PAGE_PSE | _PAGE_PRESENT)) == \ | ||
404 | (_PAGE_PSE | _PAGE_PRESENT)) | ||
405 | |||
406 | /* | ||
407 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | ||
408 | * | ||
409 | * this macro returns the index of the entry in the pgd page which would | ||
410 | * control the given virtual address | ||
411 | */ | ||
412 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | ||
413 | |||
414 | /* | ||
415 | * pgd_offset() returns a (pgd_t *) | ||
416 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | ||
417 | */ | ||
418 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | ||
419 | |||
420 | /* | ||
421 | * a shortcut which implies the use of the kernel's pgd, instead | ||
422 | * of a process's | ||
423 | */ | ||
424 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
425 | |||
426 | /* | ||
427 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | ||
428 | * | ||
429 | * this macro returns the index of the entry in the pmd page which would | ||
430 | * control the given virtual address | ||
431 | */ | ||
432 | #define pmd_index(address) \ | ||
433 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) | ||
434 | |||
435 | /* | ||
436 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | ||
437 | * | ||
438 | * this macro returns the index of the entry in the pte page which would | ||
439 | * control the given virtual address | ||
440 | */ | ||
441 | #define pte_index(address) \ | ||
442 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
443 | |||
444 | #define pte_offset_kernel(dir, address) \ | ||
445 | ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address)) | ||
446 | |||
447 | /* | ||
448 | * Make a given kernel text page executable/non-executable. | ||
449 | * Returns the previous executability setting of that page (which | ||
450 | * is used to restore the previous state). Used by the SMP bootup code. | ||
451 | * NOTE: this is an __init function for security reasons. | ||
452 | */ | ||
453 | static inline int set_kernel_exec(unsigned long vaddr, int enable) | ||
454 | { | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | #define pte_offset_map(dir, address) \ | ||
459 | ((pte_t *) page_address(pmd_page(*(dir))) + pte_index(address)) | ||
460 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | ||
461 | #define pte_unmap(pte) do {} while (0) | ||
462 | #define pte_unmap_nested(pte) do {} while (0) | ||
463 | |||
464 | /* | ||
465 | * The MN10300 has external MMU info in the form of a TLB: this is adapted from | ||
466 | * the kernel page tables containing the necessary information by tlb-mn10300.S | ||
467 | */ | ||
468 | extern void update_mmu_cache(struct vm_area_struct *vma, | ||
469 | unsigned long address, pte_t pte); | ||
470 | |||
471 | #endif /* !__ASSEMBLY__ */ | ||
472 | |||
473 | #define kern_addr_valid(addr) (1) | ||
474 | |||
475 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ | ||
476 | remap_pfn_range((vma), (vaddr), (pfn), (size), (prot)) | ||
477 | |||
478 | #define MK_IOSPACE_PFN(space, pfn) (pfn) | ||
479 | #define GET_IOSPACE(pfn) 0 | ||
480 | #define GET_PFN(pfn) (pfn) | ||
481 | |||
482 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | ||
483 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | ||
484 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | ||
485 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | ||
486 | #define __HAVE_ARCH_PTEP_MKDIRTY | ||
487 | #define __HAVE_ARCH_PTE_SAME | ||
488 | #include <asm-generic/pgtable.h> | ||
489 | |||
490 | #endif /* !__ASSEMBLY__ */ | ||
491 | |||
492 | #endif /* _ASM_PGTABLE_H */ | ||