diff options
Diffstat (limited to 'include/asm-xtensa/pgtable.h')
-rw-r--r-- | include/asm-xtensa/pgtable.h | 235 |
1 files changed, 116 insertions, 119 deletions
diff --git a/include/asm-xtensa/pgtable.h b/include/asm-xtensa/pgtable.h index 06850f3b26a7..c0fcc1c9660c 100644 --- a/include/asm-xtensa/pgtable.h +++ b/include/asm-xtensa/pgtable.h | |||
@@ -1,11 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * linux/include/asm-xtensa/pgtable.h | 2 | * include/asm-xtensa/pgtable.h |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License version2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 8 | * Copyright (C) 2001 - 2007 Tensilica Inc. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #ifndef _XTENSA_PGTABLE_H | 11 | #ifndef _XTENSA_PGTABLE_H |
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | /* | 24 | /* |
25 | * The Xtensa architecture port of Linux has a two-level page table system, | 25 | * The Xtensa architecture port of Linux has a two-level page table system, |
26 | * i.e. the logical three-level Linux page table layout are folded. | 26 | * i.e. the logical three-level Linux page table layout is folded. |
27 | * Each task has the following memory page tables: | 27 | * Each task has the following memory page tables: |
28 | * | 28 | * |
29 | * PGD table (page directory), ie. 3rd-level page table: | 29 | * PGD table (page directory), ie. 3rd-level page table: |
@@ -43,6 +43,7 @@ | |||
43 | * | 43 | * |
44 | * The individual pages are 4 kB big with special pages for the empty_zero_page. | 44 | * The individual pages are 4 kB big with special pages for the empty_zero_page. |
45 | */ | 45 | */ |
46 | |||
46 | #define PGDIR_SHIFT 22 | 47 | #define PGDIR_SHIFT 22 |
47 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 48 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
48 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 49 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
@@ -53,24 +54,26 @@ | |||
53 | */ | 54 | */ |
54 | #define PTRS_PER_PTE 1024 | 55 | #define PTRS_PER_PTE 1024 |
55 | #define PTRS_PER_PTE_SHIFT 10 | 56 | #define PTRS_PER_PTE_SHIFT 10 |
56 | #define PTRS_PER_PMD 1 | ||
57 | #define PTRS_PER_PGD 1024 | 57 | #define PTRS_PER_PGD 1024 |
58 | #define PGD_ORDER 0 | 58 | #define PGD_ORDER 0 |
59 | #define PMD_ORDER 0 | ||
60 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) | 59 | #define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE) |
61 | #define FIRST_USER_ADDRESS 0 | 60 | #define FIRST_USER_ADDRESS 0 |
62 | #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) | 61 | #define FIRST_USER_PGD_NR (FIRST_USER_ADDRESS >> PGDIR_SHIFT) |
63 | 62 | ||
64 | /* virtual memory area. We keep a distance to other memory regions to be | 63 | /* |
64 | * Virtual memory area. We keep a distance to other memory regions to be | ||
65 | * on the safe side. We also use this area for cache aliasing. | 65 | * on the safe side. We also use this area for cache aliasing. |
66 | */ | 66 | */ |
67 | 67 | ||
68 | // FIXME: virtual memory area must be configuration-dependent | ||
69 | |||
70 | #define VMALLOC_START 0xC0000000 | 68 | #define VMALLOC_START 0xC0000000 |
71 | #define VMALLOC_END 0xC7FF0000 | 69 | #define VMALLOC_END 0xC6FEFFFF |
70 | #define TLBTEMP_BASE_1 0xC6FF0000 | ||
71 | #define TLBTEMP_BASE_2 0xC6FF8000 | ||
72 | #define MODULE_START 0xC7000000 | ||
73 | #define MODULE_END 0xC7FFFFFF | ||
72 | 74 | ||
73 | /* Xtensa Linux config PTE layout (when present): | 75 | /* |
76 | * Xtensa Linux config PTE layout (when present): | ||
74 | * 31-12: PPN | 77 | * 31-12: PPN |
75 | * 11-6: Software | 78 | * 11-6: Software |
76 | * 5-4: RING | 79 | * 5-4: RING |
@@ -86,47 +89,55 @@ | |||
86 | * See further below for PTE layout for swapped-out pages. | 89 | * See further below for PTE layout for swapped-out pages. |
87 | */ | 90 | */ |
88 | 91 | ||
89 | #define _PAGE_VALID (1<<0) /* hardware: page is accessible */ | 92 | #define _PAGE_HW_EXEC (1<<0) /* hardware: page is executable */ |
90 | #define _PAGE_WRENABLE (1<<1) /* hardware: page is writable */ | 93 | #define _PAGE_HW_WRITE (1<<1) /* hardware: page is writable */ |
94 | |||
95 | #define _PAGE_FILE (1<<1) /* non-linear mapping, if !present */ | ||
96 | #define _PAGE_PROTNONE (3<<0) /* special case for VM_PROT_NONE */ | ||
91 | 97 | ||
92 | /* None of these cache modes include MP coherency: */ | 98 | /* None of these cache modes include MP coherency: */ |
93 | #define _PAGE_NO_CACHE (0<<2) /* bypass, non-speculative */ | 99 | #define _PAGE_CA_BYPASS (0<<2) /* bypass, non-speculative */ |
94 | #if XCHAL_DCACHE_IS_WRITEBACK | 100 | #define _PAGE_CA_WB (1<<2) /* write-back */ |
95 | # define _PAGE_WRITEBACK (1<<2) /* write back */ | 101 | #define _PAGE_CA_WT (2<<2) /* write-through */ |
96 | # define _PAGE_WRITETHRU (2<<2) /* write through */ | 102 | #define _PAGE_CA_MASK (3<<2) |
97 | #else | 103 | #define _PAGE_INVALID (3<<2) |
98 | # define _PAGE_WRITEBACK (1<<2) /* assume write through */ | ||
99 | # define _PAGE_WRITETHRU (1<<2) | ||
100 | #endif | ||
101 | #define _PAGE_NOALLOC (3<<2) /* don't allocate cache,if not cached */ | ||
102 | #define _CACHE_MASK (3<<2) | ||
103 | 104 | ||
104 | #define _PAGE_USER (1<<4) /* user access (ring=1) */ | 105 | #define _PAGE_USER (1<<4) /* user access (ring=1) */ |
105 | #define _PAGE_KERNEL (0<<4) /* kernel access (ring=0) */ | ||
106 | 106 | ||
107 | /* Software */ | 107 | /* Software */ |
108 | #define _PAGE_RW (1<<6) /* software: page writable */ | 108 | #define _PAGE_WRITABLE_BIT 6 |
109 | #define _PAGE_WRITABLE (1<<6) /* software: page writable */ | ||
109 | #define _PAGE_DIRTY (1<<7) /* software: page dirty */ | 110 | #define _PAGE_DIRTY (1<<7) /* software: page dirty */ |
110 | #define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ | 111 | #define _PAGE_ACCESSED (1<<8) /* software: page accessed (read) */ |
111 | #define _PAGE_FILE (1<<9) /* nonlinear file mapping*/ | ||
112 | 112 | ||
113 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _CACHE_MASK | _PAGE_DIRTY) | 113 | /* On older HW revisions, we always have to set bit 0 */ |
114 | #define _PAGE_PRESENT ( _PAGE_VALID | _PAGE_WRITEBACK | _PAGE_ACCESSED) | 114 | #if XCHAL_HW_VERSION_MAJOR < 2000 |
115 | # define _PAGE_VALID (1<<0) | ||
116 | #else | ||
117 | # define _PAGE_VALID 0 | ||
118 | #endif | ||
115 | 119 | ||
116 | #ifdef CONFIG_MMU | 120 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
121 | #define _PAGE_PRESENT (_PAGE_VALID | _PAGE_CA_WB | _PAGE_ACCESSED) | ||
117 | 122 | ||
118 | # define PAGE_NONE __pgprot(_PAGE_PRESENT) | 123 | #ifdef CONFIG_MMU |
119 | # define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_RW) | ||
120 | # define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER) | ||
121 | # define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER) | ||
122 | # define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_KERNEL | _PAGE_WRENABLE) | ||
123 | # define PAGE_INVALID __pgprot(_PAGE_USER) | ||
124 | 124 | ||
125 | # if (DCACHE_WAY_SIZE > PAGE_SIZE) | 125 | #define PAGE_NONE __pgprot(_PAGE_INVALID | _PAGE_USER | _PAGE_PROTNONE) |
126 | # define PAGE_DIRECTORY __pgprot(_PAGE_VALID | _PAGE_ACCESSED | _PAGE_KERNEL) | 126 | #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER) |
127 | # else | 127 | #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC) |
128 | # define PAGE_DIRECTORY __pgprot(_PAGE_PRESENT | _PAGE_KERNEL) | 128 | #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER) |
129 | # endif | 129 | #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_HW_EXEC) |
130 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE) | ||
131 | #define PAGE_SHARED_EXEC \ | ||
132 | __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITABLE | _PAGE_HW_EXEC) | ||
133 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_HW_WRITE) | ||
134 | #define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT|_PAGE_HW_WRITE|_PAGE_HW_EXEC) | ||
135 | |||
136 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) | ||
137 | # define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED) | ||
138 | #else | ||
139 | # define _PAGE_DIRECTORY (_PAGE_VALID | _PAGE_ACCESSED | _PAGE_CA_WB) | ||
140 | #endif | ||
130 | 141 | ||
131 | #else /* no mmu */ | 142 | #else /* no mmu */ |
132 | 143 | ||
@@ -145,23 +156,23 @@ | |||
145 | * What follows is the closest we can get by reasonable means.. | 156 | * What follows is the closest we can get by reasonable means.. |
146 | * See linux/mm/mmap.c for protection_map[] array that uses these definitions. | 157 | * See linux/mm/mmap.c for protection_map[] array that uses these definitions. |
147 | */ | 158 | */ |
148 | #define __P000 PAGE_NONE /* private --- */ | 159 | #define __P000 PAGE_NONE /* private --- */ |
149 | #define __P001 PAGE_READONLY /* private --r */ | 160 | #define __P001 PAGE_READONLY /* private --r */ |
150 | #define __P010 PAGE_COPY /* private -w- */ | 161 | #define __P010 PAGE_COPY /* private -w- */ |
151 | #define __P011 PAGE_COPY /* private -wr */ | 162 | #define __P011 PAGE_COPY /* private -wr */ |
152 | #define __P100 PAGE_READONLY /* private x-- */ | 163 | #define __P100 PAGE_READONLY_EXEC /* private x-- */ |
153 | #define __P101 PAGE_READONLY /* private x-r */ | 164 | #define __P101 PAGE_READONLY_EXEC /* private x-r */ |
154 | #define __P110 PAGE_COPY /* private xw- */ | 165 | #define __P110 PAGE_COPY_EXEC /* private xw- */ |
155 | #define __P111 PAGE_COPY /* private xwr */ | 166 | #define __P111 PAGE_COPY_EXEC /* private xwr */ |
156 | 167 | ||
157 | #define __S000 PAGE_NONE /* shared --- */ | 168 | #define __S000 PAGE_NONE /* shared --- */ |
158 | #define __S001 PAGE_READONLY /* shared --r */ | 169 | #define __S001 PAGE_READONLY /* shared --r */ |
159 | #define __S010 PAGE_SHARED /* shared -w- */ | 170 | #define __S010 PAGE_SHARED /* shared -w- */ |
160 | #define __S011 PAGE_SHARED /* shared -wr */ | 171 | #define __S011 PAGE_SHARED /* shared -wr */ |
161 | #define __S100 PAGE_READONLY /* shared x-- */ | 172 | #define __S100 PAGE_READONLY_EXEC /* shared x-- */ |
162 | #define __S101 PAGE_READONLY /* shared x-r */ | 173 | #define __S101 PAGE_READONLY_EXEC /* shared x-r */ |
163 | #define __S110 PAGE_SHARED /* shared xw- */ | 174 | #define __S110 PAGE_SHARED_EXEC /* shared xw- */ |
164 | #define __S111 PAGE_SHARED /* shared xwr */ | 175 | #define __S111 PAGE_SHARED_EXEC /* shared xwr */ |
165 | 176 | ||
166 | #ifndef __ASSEMBLY__ | 177 | #ifndef __ASSEMBLY__ |
167 | 178 | ||
@@ -183,35 +194,42 @@ extern pgd_t swapper_pg_dir[PAGE_SIZE/sizeof(pgd_t)]; | |||
183 | #define pmd_page(pmd) virt_to_page(pmd_val(pmd)) | 194 | #define pmd_page(pmd) virt_to_page(pmd_val(pmd)) |
184 | 195 | ||
185 | /* | 196 | /* |
186 | * The following only work if pte_present() is true. | 197 | * pte status. |
187 | */ | 198 | */ |
188 | #define pte_none(pte) (!(pte_val(pte) ^ _PAGE_USER)) | 199 | #define pte_none(pte) (pte_val(pte) == _PAGE_INVALID) |
189 | #define pte_present(pte) (pte_val(pte) & _PAGE_VALID) | 200 | #define pte_present(pte) \ |
201 | (((pte_val(pte) & _PAGE_CA_MASK) != _PAGE_INVALID) \ | ||
202 | || ((pte_val(pte) & _PAGE_PROTNONE) == _PAGE_PROTNONE)) | ||
190 | #define pte_clear(mm,addr,ptep) \ | 203 | #define pte_clear(mm,addr,ptep) \ |
191 | do { update_pte(ptep, __pte(_PAGE_USER)); } while(0) | 204 | do { update_pte(ptep, __pte(_PAGE_INVALID)); } while(0) |
192 | 205 | ||
193 | #define pmd_none(pmd) (!pmd_val(pmd)) | 206 | #define pmd_none(pmd) (!pmd_val(pmd)) |
194 | #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) | 207 | #define pmd_present(pmd) (pmd_val(pmd) & PAGE_MASK) |
195 | #define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0) | ||
196 | #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) | 208 | #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK) |
209 | #define pmd_clear(pmdp) do { set_pmd(pmdp, __pmd(0)); } while (0) | ||
197 | 210 | ||
198 | /* Note: We use the _PAGE_USER bit to indicate write-protect kernel memory */ | 211 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; } |
199 | |||
200 | static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; } | ||
201 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } | 212 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } |
202 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } | 213 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } |
203 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } | 214 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } |
204 | static inline pte_t pte_wrprotect(pte_t pte) { pte_val(pte) &= ~(_PAGE_RW | _PAGE_WRENABLE); return pte; } | 215 | static inline pte_t pte_wrprotect(pte_t pte) |
205 | static inline pte_t pte_mkclean(pte_t pte) { pte_val(pte) &= ~_PAGE_DIRTY; return pte; } | 216 | { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; } |
206 | static inline pte_t pte_mkold(pte_t pte) { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | 217 | static inline pte_t pte_mkclean(pte_t pte) |
207 | static inline pte_t pte_mkdirty(pte_t pte) { pte_val(pte) |= _PAGE_DIRTY; return pte; } | 218 | { pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; } |
208 | static inline pte_t pte_mkyoung(pte_t pte) { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | 219 | static inline pte_t pte_mkold(pte_t pte) |
209 | static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_RW; return pte; } | 220 | { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } |
221 | static inline pte_t pte_mkdirty(pte_t pte) | ||
222 | { pte_val(pte) |= _PAGE_DIRTY; return pte; } | ||
223 | static inline pte_t pte_mkyoung(pte_t pte) | ||
224 | { pte_val(pte) |= _PAGE_ACCESSED; return pte; } | ||
225 | static inline pte_t pte_mkwrite(pte_t pte) | ||
226 | { pte_val(pte) |= _PAGE_WRITABLE; return pte; } | ||
210 | 227 | ||
211 | /* | 228 | /* |
212 | * Conversion functions: convert a page and protection to a page entry, | 229 | * Conversion functions: convert a page and protection to a page entry, |
213 | * and a page entry and page directory to the page they refer to. | 230 | * and a page entry and page directory to the page they refer to. |
214 | */ | 231 | */ |
232 | |||
215 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | 233 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
216 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) | 234 | #define pte_same(a,b) (pte_val(a) == pte_val(b)) |
217 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 235 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
@@ -232,8 +250,9 @@ static inline void update_pte(pte_t *ptep, pte_t pteval) | |||
232 | { | 250 | { |
233 | *ptep = pteval; | 251 | *ptep = pteval; |
234 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | 252 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK |
235 | __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (ptep)); | 253 | __asm__ __volatile__ ("dhwb %0, 0" :: "a" (ptep)); |
236 | #endif | 254 | #endif |
255 | |||
237 | } | 256 | } |
238 | 257 | ||
239 | struct mm_struct; | 258 | struct mm_struct; |
@@ -249,9 +268,6 @@ static inline void | |||
249 | set_pmd(pmd_t *pmdp, pmd_t pmdval) | 268 | set_pmd(pmd_t *pmdp, pmd_t pmdval) |
250 | { | 269 | { |
251 | *pmdp = pmdval; | 270 | *pmdp = pmdval; |
252 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | ||
253 | __asm__ __volatile__ ("memw; dhwb %0, 0; dsync" :: "a" (pmdp)); | ||
254 | #endif | ||
255 | } | 271 | } |
256 | 272 | ||
257 | struct vm_area_struct; | 273 | struct vm_area_struct; |
@@ -306,52 +322,34 @@ ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |||
306 | 322 | ||
307 | /* | 323 | /* |
308 | * Encode and decode a swap entry. | 324 | * Encode and decode a swap entry. |
309 | * Each PTE in a process VM's page table is either: | ||
310 | * "present" -- valid and not swapped out, protection bits are meaningful; | ||
311 | * "not present" -- which further subdivides in these two cases: | ||
312 | * "none" -- no mapping at all; identified by pte_none(), set by pte_clear( | ||
313 | * "swapped out" -- the page is swapped out, and the SWP macros below | ||
314 | * are used to store swap file info in the PTE itself. | ||
315 | * | 325 | * |
316 | * In the Xtensa processor MMU, any PTE entries in user space (or anywhere | 326 | * Format of swap pte: |
317 | * in virtual memory that can map differently across address spaces) | 327 | * bit 0 MBZ |
318 | * must have a correct ring value that represents the RASID field that | 328 | * bit 1 page-file (must be zero) |
319 | * is changed when switching address spaces. Eg. such PTE entries cannot | 329 | * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID) |
320 | * be set to ring zero, because that can cause a (global) kernel ASID | 330 | * bits 4 - 5 ring protection (must be 01: _PAGE_USER) |
321 | * entry to be created in the TLBs (even with invalid cache attribute), | 331 | * bits 6 - 10 swap type (5 bits -> 32 types) |
322 | * potentially causing a multihit exception when going back to another | 332 | * bits 11 - 31 swap offset / PAGE_SIZE (21 bits -> 8GB) |
323 | * address space that mapped the same virtual address at another ring. | 333 | |
324 | * | 334 | * Format of file pte: |
325 | * SO: we avoid using ring bits (_PAGE_RING_MASK) in "not present" PTEs. | 335 | * bit 0 MBZ |
326 | * We also avoid using the _PAGE_VALID bit which must be zero for non-present | 336 | * bit 1 page-file (must be one: _PAGE_FILE) |
327 | * pages. | 337 | * bits 2 - 3 page hw access mode (must be 11: _PAGE_INVALID) |
328 | * | 338 | * bits 4 - 5 ring protection (must be 01: _PAGE_USER) |
329 | * We end up with the following available bits: 1..3 and 7..31. | 339 | * bits 6 - 31 file offset / PAGE_SIZE |
330 | * We don't bother with 1..3 for now (we can use them later if needed), | ||
331 | * and chose to allocate 6 bits for SWP_TYPE and the remaining 19 bits | ||
332 | * for SWP_OFFSET. At least 5 bits are needed for SWP_TYPE, because it | ||
333 | * is currently implemented as an index into swap_info[MAX_SWAPFILES] | ||
334 | * and MAX_SWAPFILES is currently defined as 32 in <linux/swap.h>. | ||
335 | * However, for some reason all other architectures in the 2.4 kernel | ||
336 | * reserve either 6, 7, or 8 bits so I'll not detract from that for now. :) | ||
337 | * SWP_OFFSET is an offset into the swap file in page-size units, so | ||
338 | * with 4 kB pages, 19 bits supports a maximum swap file size of 2 GB. | ||
339 | * | ||
340 | * FIXME: 2 GB isn't very big. Other bits can be used to allow | ||
341 | * larger swap sizes. In the meantime, it appears relatively easy to get | ||
342 | * around the 2 GB limitation by simply using multiple swap files. | ||
343 | */ | 340 | */ |
344 | 341 | ||
345 | #define __swp_type(entry) (((entry).val >> 7) & 0x3f) | 342 | #define __swp_type(entry) (((entry).val >> 6) & 0x1f) |
346 | #define __swp_offset(entry) ((entry).val >> 13) | 343 | #define __swp_offset(entry) ((entry).val >> 11) |
347 | #define __swp_entry(type,offs) ((swp_entry_t) {((type) << 7) | ((offs) << 13)}) | 344 | #define __swp_entry(type,offs) \ |
345 | ((swp_entry_t) {((type) << 6) | ((offs) << 11) | _PAGE_INVALID}) | ||
348 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 346 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
349 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 347 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
350 | 348 | ||
351 | #define PTE_FILE_MAX_BITS 29 | 349 | #define PTE_FILE_MAX_BITS 28 |
352 | #define pte_to_pgoff(pte) (pte_val(pte) >> 3) | 350 | #define pte_to_pgoff(pte) (pte_val(pte) >> 4) |
353 | #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) | 351 | #define pgoff_to_pte(off) \ |
354 | 352 | ((pte_t) { ((off) << 4) | _PAGE_INVALID | _PAGE_FILE }) | |
355 | 353 | ||
356 | #endif /* !defined (__ASSEMBLY__) */ | 354 | #endif /* !defined (__ASSEMBLY__) */ |
357 | 355 | ||
@@ -394,13 +392,12 @@ extern void update_mmu_cache(struct vm_area_struct * vma, | |||
394 | * remap a physical page `pfn' of size `size' with page protection `prot' | 392 | * remap a physical page `pfn' of size `size' with page protection `prot' |
395 | * into virtual address `from' | 393 | * into virtual address `from' |
396 | */ | 394 | */ |
395 | |||
397 | #define io_remap_pfn_range(vma,from,pfn,size,prot) \ | 396 | #define io_remap_pfn_range(vma,from,pfn,size,prot) \ |
398 | remap_pfn_range(vma, from, pfn, size, prot) | 397 | remap_pfn_range(vma, from, pfn, size, prot) |
399 | 398 | ||
400 | 399 | ||
401 | /* No page table caches to init */ | 400 | extern void pgtable_cache_init(void); |
402 | |||
403 | #define pgtable_cache_init() do { } while (0) | ||
404 | 401 | ||
405 | typedef pte_t *pte_addr_t; | 402 | typedef pte_t *pte_addr_t; |
406 | 403 | ||