diff options
Diffstat (limited to 'include/asm-sh/pgtable_64.h')
-rw-r--r-- | include/asm-sh/pgtable_64.h | 299 |
1 files changed, 299 insertions, 0 deletions
diff --git a/include/asm-sh/pgtable_64.h b/include/asm-sh/pgtable_64.h new file mode 100644 index 000000000000..972211671c9a --- /dev/null +++ b/include/asm-sh/pgtable_64.h | |||
@@ -0,0 +1,299 @@ | |||
1 | #ifndef __ASM_SH_PGTABLE_64_H | ||
2 | #define __ASM_SH_PGTABLE_64_H | ||
3 | |||
4 | /* | ||
5 | * include/asm-sh/pgtable_64.h | ||
6 | * | ||
7 | * This file contains the functions and defines necessary to modify and use | ||
8 | * the SuperH page table tree. | ||
9 | * | ||
10 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
11 | * Copyright (C) 2003, 2004 Paul Mundt | ||
12 | * Copyright (C) 2003, 2004 Richard Curnow | ||
13 | * | ||
14 | * This file is subject to the terms and conditions of the GNU General Public | ||
15 | * License. See the file "COPYING" in the main directory of this archive | ||
16 | * for more details. | ||
17 | */ | ||
18 | #include <linux/threads.h> | ||
19 | #include <asm/processor.h> | ||
20 | #include <asm/page.h> | ||
21 | |||
22 | /* | ||
23 | * Error outputs. | ||
24 | */ | ||
25 | #define pte_ERROR(e) \ | ||
26 | printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) | ||
27 | #define pgd_ERROR(e) \ | ||
28 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | ||
29 | |||
30 | /* | ||
31 | * Table setting routines. Used within arch/mm only. | ||
32 | */ | ||
33 | #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval) | ||
34 | |||
35 | static __inline__ void set_pte(pte_t *pteptr, pte_t pteval) | ||
36 | { | ||
37 | unsigned long long x = ((unsigned long long) pteval.pte_low); | ||
38 | unsigned long long *xp = (unsigned long long *) pteptr; | ||
39 | /* | ||
40 | * Sign-extend based on NPHYS. | ||
41 | */ | ||
42 | *(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x; | ||
43 | } | ||
44 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
45 | |||
46 | static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) | ||
47 | { | ||
48 | pmd_val(*pmdp) = (unsigned long) ptep; | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * PGD defines. Top level. | ||
53 | */ | ||
54 | |||
55 | /* To find an entry in a generic PGD. */ | ||
56 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | ||
57 | #define __pgd_offset(address) pgd_index(address) | ||
58 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | ||
59 | |||
60 | /* To find an entry in a kernel PGD. */ | ||
61 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | ||
62 | |||
63 | /* | ||
64 | * PMD level access routines. Same notes as above. | ||
65 | */ | ||
66 | #define _PMD_EMPTY 0x0 | ||
67 | /* Either the PMD is empty or present, it's not paged out */ | ||
68 | #define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT) | ||
69 | #define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY))) | ||
70 | #define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY) | ||
71 | #define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) | ||
72 | |||
73 | #define pmd_page_vaddr(pmd_entry) \ | ||
74 | ((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK)) | ||
75 | |||
76 | #define pmd_page(pmd) \ | ||
77 | (virt_to_page(pmd_val(pmd))) | ||
78 | |||
79 | /* PMD to PTE dereferencing */ | ||
80 | #define pte_index(address) \ | ||
81 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
82 | |||
83 | #define pte_offset_kernel(dir, addr) \ | ||
84 | ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) | ||
85 | |||
86 | #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr) | ||
87 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel(dir, addr) | ||
88 | #define pte_unmap(pte) do { } while (0) | ||
89 | #define pte_unmap_nested(pte) do { } while (0) | ||
90 | |||
91 | #ifndef __ASSEMBLY__ | ||
92 | #define IOBASE_VADDR 0xff000000 | ||
93 | #define IOBASE_END 0xffffffff | ||
94 | |||
95 | /* | ||
96 | * PTEL coherent flags. | ||
97 | * See Chapter 17 ST50 CPU Core Volume 1, Architecture. | ||
98 | */ | ||
99 | /* The bits that are required in the SH-5 TLB are placed in the h/w-defined | ||
100 | positions, to avoid expensive bit shuffling on every refill. The remaining | ||
101 | bits are used for s/w purposes and masked out on each refill. | ||
102 | |||
103 | Note, the PTE slots are used to hold data of type swp_entry_t when a page is | ||
104 | swapped out. Only the _PAGE_PRESENT flag is significant when the page is | ||
105 | swapped out, and it must be placed so that it doesn't overlap either the | ||
106 | type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type | ||
107 | at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This | ||
108 | scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit | ||
109 | [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split | ||
110 | into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */ | ||
111 | #define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */ | ||
112 | #define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */ | ||
113 | #define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */ | ||
114 | #define _PAGE_PRESENT 0x004 /* software: page referenced */ | ||
115 | #define _PAGE_FILE 0x004 /* software: only when !present */ | ||
116 | #define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */ | ||
117 | #define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */ | ||
118 | #define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */ | ||
119 | #define _PAGE_READ 0x040 /* PR0-bit : read access allowed */ | ||
120 | #define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */ | ||
121 | #define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */ | ||
122 | #define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */ | ||
123 | #define _PAGE_DIRTY 0x400 /* software: page accessed in write */ | ||
124 | #define _PAGE_ACCESSED 0x800 /* software: page referenced */ | ||
125 | |||
126 | /* Mask which drops software flags */ | ||
127 | #define _PAGE_FLAGS_HARDWARE_MASK 0xfffffffffffff3dbLL | ||
128 | |||
129 | /* | ||
130 | * HugeTLB support | ||
131 | */ | ||
132 | #if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) | ||
133 | #define _PAGE_SZHUGE (_PAGE_SIZE0) | ||
134 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB) | ||
135 | #define _PAGE_SZHUGE (_PAGE_SIZE1) | ||
136 | #elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB) | ||
137 | #define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1) | ||
138 | #endif | ||
139 | |||
140 | /* | ||
141 | * Default flags for a Kernel page. | ||
142 | * This is fundametally also SHARED because the main use of this define | ||
143 | * (other than for PGD/PMD entries) is for the VMALLOC pool which is | ||
144 | * contextless. | ||
145 | * | ||
146 | * _PAGE_EXECUTE is required for modules | ||
147 | * | ||
148 | */ | ||
149 | #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ | ||
150 | _PAGE_EXECUTE | \ | ||
151 | _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \ | ||
152 | _PAGE_SHARED) | ||
153 | |||
154 | /* Default flags for a User page */ | ||
155 | #define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER) | ||
156 | |||
157 | #define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) | ||
158 | |||
159 | /* | ||
160 | * We have full permissions (Read/Write/Execute/Shared). | ||
161 | */ | ||
162 | #define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \ | ||
163 | _PAGE_CACHABLE | _PAGE_ACCESSED) | ||
164 | |||
165 | #define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED) | ||
166 | #define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \ | ||
167 | _PAGE_SHARED) | ||
168 | #define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE) | ||
169 | |||
170 | /* | ||
171 | * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default | ||
172 | * protection mode for the stack. | ||
173 | */ | ||
174 | #define PAGE_COPY PAGE_EXECREAD | ||
175 | |||
176 | #define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ) | ||
177 | #define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE) | ||
178 | #define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \ | ||
179 | _PAGE_WRITE | _PAGE_EXECUTE) | ||
180 | #define PAGE_KERNEL __pgprot(_KERNPG_TABLE) | ||
181 | |||
182 | /* Make it a device mapping for maximum safety (e.g. for mapping device | ||
183 | registers into user-space via /dev/map). */ | ||
184 | #define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE) | ||
185 | #define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE) | ||
186 | |||
187 | /* | ||
188 | * Handling allocation failures during page table setup. | ||
189 | */ | ||
190 | extern void __handle_bad_pmd_kernel(pmd_t * pmd); | ||
191 | #define __handle_bad_pmd(x) __handle_bad_pmd_kernel(x) | ||
192 | |||
193 | /* | ||
194 | * PTE level access routines. | ||
195 | * | ||
196 | * Note1: | ||
197 | * It's the tree walk leaf. This is physical address to be stored. | ||
198 | * | ||
199 | * Note 2: | ||
200 | * Regarding the choice of _PTE_EMPTY: | ||
201 | |||
202 | We must choose a bit pattern that cannot be valid, whether or not the page | ||
203 | is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped | ||
204 | out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is | ||
205 | left for us to select. If we force bit[7]==0 when swapped out, we could use | ||
206 | the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if | ||
207 | we force bit[7]==1 when swapped out, we can use all zeroes to indicate | ||
208 | empty. This is convenient, because the page tables get cleared to zero | ||
209 | when they are allocated. | ||
210 | |||
211 | */ | ||
212 | #define _PTE_EMPTY 0x0 | ||
213 | #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) | ||
214 | #define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY))) | ||
215 | #define pte_none(x) (pte_val(x) == _PTE_EMPTY) | ||
216 | |||
217 | /* | ||
218 | * Some definitions to translate between mem_map, PTEs, and page | ||
219 | * addresses: | ||
220 | */ | ||
221 | |||
222 | /* | ||
223 | * Given a PTE, return the index of the mem_map[] entry corresponding | ||
224 | * to the page frame the PTE. Get the absolute physical address, make | ||
225 | * a relative physical address and translate it to an index. | ||
226 | */ | ||
227 | #define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \ | ||
228 | __MEMORY_START) >> PAGE_SHIFT) | ||
229 | |||
230 | /* | ||
231 | * Given a PTE, return the "struct page *". | ||
232 | */ | ||
233 | #define pte_page(x) (mem_map + pte_pagenr(x)) | ||
234 | |||
235 | /* | ||
236 | * Return number of (down rounded) MB corresponding to x pages. | ||
237 | */ | ||
238 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | ||
239 | |||
240 | |||
241 | /* | ||
242 | * The following have defined behavior only work if pte_present() is true. | ||
243 | */ | ||
244 | static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; } | ||
245 | static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; } | ||
246 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | ||
247 | static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; } | ||
248 | |||
249 | static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; } | ||
250 | static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; } | ||
251 | static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; } | ||
252 | static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; } | ||
253 | static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; } | ||
254 | static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; } | ||
255 | static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; } | ||
256 | |||
257 | |||
258 | /* | ||
259 | * Conversion functions: convert a page and protection to a page entry. | ||
260 | * | ||
261 | * extern pte_t mk_pte(struct page *page, pgprot_t pgprot) | ||
262 | */ | ||
263 | #define mk_pte(page,pgprot) \ | ||
264 | ({ \ | ||
265 | pte_t __pte; \ | ||
266 | \ | ||
267 | set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \ | ||
268 | __MEMORY_START | pgprot_val((pgprot)))); \ | ||
269 | __pte; \ | ||
270 | }) | ||
271 | |||
272 | /* | ||
273 | * This takes a (absolute) physical page address that is used | ||
274 | * by the remapping functions | ||
275 | */ | ||
276 | #define mk_pte_phys(physpage, pgprot) \ | ||
277 | ({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; }) | ||
278 | |||
279 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | ||
280 | { set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; } | ||
281 | |||
282 | /* Encode and decode a swap entry */ | ||
283 | #define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c)) | ||
284 | #define __swp_offset(x) ((x).val >> 8) | ||
285 | #define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) }) | ||
286 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | ||
287 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | ||
288 | |||
289 | /* Encode and decode a nonlinear file mapping entry */ | ||
290 | #define PTE_FILE_MAX_BITS 29 | ||
291 | #define pte_to_pgoff(pte) (pte_val(pte)) | ||
292 | #define pgoff_to_pte(off) ((pte_t) { (off) | _PAGE_FILE }) | ||
293 | |||
294 | #endif /* !__ASSEMBLY__ */ | ||
295 | |||
296 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
297 | #define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | ||
298 | |||
299 | #endif /* __ASM_SH_PGTABLE_64_H */ | ||