diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/sparc/include/asm/pgtable_32.h | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'arch/sparc/include/asm/pgtable_32.h')
-rw-r--r-- | arch/sparc/include/asm/pgtable_32.h | 433 |
1 files changed, 223 insertions, 210 deletions
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 6fc13483f70..a790cc65747 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h | |||
@@ -16,10 +16,12 @@ | |||
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/swap.h> | 17 | #include <linux/swap.h> |
18 | #include <asm/types.h> | 18 | #include <asm/types.h> |
19 | #include <asm/pgtsun4c.h> | ||
19 | #include <asm/pgtsrmmu.h> | 20 | #include <asm/pgtsrmmu.h> |
20 | #include <asm/vaddrs.h> | 21 | #include <asm/vac-ops.h> |
21 | #include <asm/oplib.h> | 22 | #include <asm/oplib.h> |
22 | #include <asm/cpu_type.h> | 23 | #include <asm/btfixup.h> |
24 | #include <asm/system.h> | ||
23 | 25 | ||
24 | 26 | ||
25 | struct vm_area_struct; | 27 | struct vm_area_struct; |
@@ -28,56 +30,89 @@ struct page; | |||
28 | extern void load_mmu(void); | 30 | extern void load_mmu(void); |
29 | extern unsigned long calc_highpages(void); | 31 | extern unsigned long calc_highpages(void); |
30 | 32 | ||
33 | BTFIXUPDEF_SIMM13(pgdir_shift) | ||
34 | BTFIXUPDEF_SETHI(pgdir_size) | ||
35 | BTFIXUPDEF_SETHI(pgdir_mask) | ||
36 | |||
37 | BTFIXUPDEF_SIMM13(ptrs_per_pmd) | ||
38 | BTFIXUPDEF_SIMM13(ptrs_per_pgd) | ||
39 | BTFIXUPDEF_SIMM13(user_ptrs_per_pgd) | ||
40 | |||
31 | #define pte_ERROR(e) __builtin_trap() | 41 | #define pte_ERROR(e) __builtin_trap() |
32 | #define pmd_ERROR(e) __builtin_trap() | 42 | #define pmd_ERROR(e) __builtin_trap() |
33 | #define pgd_ERROR(e) __builtin_trap() | 43 | #define pgd_ERROR(e) __builtin_trap() |
34 | 44 | ||
35 | #define PMD_SHIFT 22 | 45 | BTFIXUPDEF_INT(page_none) |
46 | BTFIXUPDEF_INT(page_copy) | ||
47 | BTFIXUPDEF_INT(page_readonly) | ||
48 | BTFIXUPDEF_INT(page_kernel) | ||
49 | |||
50 | #define PMD_SHIFT SUN4C_PMD_SHIFT | ||
36 | #define PMD_SIZE (1UL << PMD_SHIFT) | 51 | #define PMD_SIZE (1UL << PMD_SHIFT) |
37 | #define PMD_MASK (~(PMD_SIZE-1)) | 52 | #define PMD_MASK (~(PMD_SIZE-1)) |
38 | #define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK) | 53 | #define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK) |
39 | #define PGDIR_SHIFT SRMMU_PGDIR_SHIFT | 54 | #define PGDIR_SHIFT BTFIXUP_SIMM13(pgdir_shift) |
40 | #define PGDIR_SIZE SRMMU_PGDIR_SIZE | 55 | #define PGDIR_SIZE BTFIXUP_SETHI(pgdir_size) |
41 | #define PGDIR_MASK SRMMU_PGDIR_MASK | 56 | #define PGDIR_MASK BTFIXUP_SETHI(pgdir_mask) |
42 | #define PTRS_PER_PTE 1024 | 57 | #define PTRS_PER_PTE 1024 |
43 | #define PTRS_PER_PMD SRMMU_PTRS_PER_PMD | 58 | #define PTRS_PER_PMD BTFIXUP_SIMM13(ptrs_per_pmd) |
44 | #define PTRS_PER_PGD SRMMU_PTRS_PER_PGD | 59 | #define PTRS_PER_PGD BTFIXUP_SIMM13(ptrs_per_pgd) |
45 | #define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE | 60 | #define USER_PTRS_PER_PGD BTFIXUP_SIMM13(user_ptrs_per_pgd) |
46 | #define FIRST_USER_ADDRESS 0 | 61 | #define FIRST_USER_ADDRESS 0 |
47 | #define PTE_SIZE (PTRS_PER_PTE*4) | 62 | #define PTE_SIZE (PTRS_PER_PTE*4) |
48 | 63 | ||
49 | #define PAGE_NONE SRMMU_PAGE_NONE | 64 | #define PAGE_NONE __pgprot(BTFIXUP_INT(page_none)) |
50 | #define PAGE_SHARED SRMMU_PAGE_SHARED | 65 | extern pgprot_t PAGE_SHARED; |
51 | #define PAGE_COPY SRMMU_PAGE_COPY | 66 | #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy)) |
52 | #define PAGE_READONLY SRMMU_PAGE_RDONLY | 67 | #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly)) |
53 | #define PAGE_KERNEL SRMMU_PAGE_KERNEL | 68 | |
69 | extern unsigned long page_kernel; | ||
54 | 70 | ||
55 | /* Top-level page directory - dummy used by init-mm. | 71 | #ifdef MODULE |
56 | * srmmu.c will assign the real one (which is dynamically sized) */ | 72 | #define PAGE_KERNEL page_kernel |
57 | #define swapper_pg_dir NULL | 73 | #else |
74 | #define PAGE_KERNEL __pgprot(BTFIXUP_INT(page_kernel)) | ||
75 | #endif | ||
76 | |||
77 | /* Top-level page directory */ | ||
78 | extern pgd_t swapper_pg_dir[1024]; | ||
58 | 79 | ||
59 | extern void paging_init(void); | 80 | extern void paging_init(void); |
60 | 81 | ||
82 | /* Page table for 0-4MB for everybody, on the Sparc this | ||
83 | * holds the same as on the i386. | ||
84 | */ | ||
85 | extern pte_t pg0[1024]; | ||
86 | extern pte_t pg1[1024]; | ||
87 | extern pte_t pg2[1024]; | ||
88 | extern pte_t pg3[1024]; | ||
89 | |||
61 | extern unsigned long ptr_in_current_pgd; | 90 | extern unsigned long ptr_in_current_pgd; |
62 | 91 | ||
63 | /* xwr */ | 92 | /* Here is a trick, since mmap.c need the initializer elements for |
64 | #define __P000 PAGE_NONE | 93 | * protection_map[] to be constant at compile time, I set the following |
65 | #define __P001 PAGE_READONLY | 94 | * to all zeros. I set it to the real values after I link in the |
66 | #define __P010 PAGE_COPY | 95 | * appropriate MMU page table routines at boot time. |
67 | #define __P011 PAGE_COPY | 96 | */ |
68 | #define __P100 PAGE_READONLY | 97 | #define __P000 __pgprot(0) |
69 | #define __P101 PAGE_READONLY | 98 | #define __P001 __pgprot(0) |
70 | #define __P110 PAGE_COPY | 99 | #define __P010 __pgprot(0) |
71 | #define __P111 PAGE_COPY | 100 | #define __P011 __pgprot(0) |
72 | 101 | #define __P100 __pgprot(0) | |
73 | #define __S000 PAGE_NONE | 102 | #define __P101 __pgprot(0) |
74 | #define __S001 PAGE_READONLY | 103 | #define __P110 __pgprot(0) |
75 | #define __S010 PAGE_SHARED | 104 | #define __P111 __pgprot(0) |
76 | #define __S011 PAGE_SHARED | 105 | |
77 | #define __S100 PAGE_READONLY | 106 | #define __S000 __pgprot(0) |
78 | #define __S101 PAGE_READONLY | 107 | #define __S001 __pgprot(0) |
79 | #define __S110 PAGE_SHARED | 108 | #define __S010 __pgprot(0) |
80 | #define __S111 PAGE_SHARED | 109 | #define __S011 __pgprot(0) |
110 | #define __S100 __pgprot(0) | ||
111 | #define __S101 __pgprot(0) | ||
112 | #define __S110 __pgprot(0) | ||
113 | #define __S111 __pgprot(0) | ||
114 | |||
115 | extern int num_contexts; | ||
81 | 116 | ||
82 | /* First physical page can be anywhere, the following is needed so that | 117 | /* First physical page can be anywhere, the following is needed so that |
83 | * va-->pa and vice versa conversions work properly without performance | 118 | * va-->pa and vice versa conversions work properly without performance |
@@ -87,145 +122,97 @@ extern unsigned long phys_base; | |||
87 | extern unsigned long pfn_base; | 122 | extern unsigned long pfn_base; |
88 | 123 | ||
89 | /* | 124 | /* |
125 | * BAD_PAGETABLE is used when we need a bogus page-table, while | ||
126 | * BAD_PAGE is used for a bogus page. | ||
127 | * | ||
90 | * ZERO_PAGE is a global shared page that is always zero: used | 128 | * ZERO_PAGE is a global shared page that is always zero: used |
91 | * for zero-mapped memory areas etc.. | 129 | * for zero-mapped memory areas etc.. |
92 | */ | 130 | */ |
131 | extern pte_t * __bad_pagetable(void); | ||
132 | extern pte_t __bad_page(void); | ||
93 | extern unsigned long empty_zero_page; | 133 | extern unsigned long empty_zero_page; |
94 | 134 | ||
135 | #define BAD_PAGETABLE __bad_pagetable() | ||
136 | #define BAD_PAGE __bad_page() | ||
95 | #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) | 137 | #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page)) |
96 | 138 | ||
97 | /* | 139 | /* |
98 | * In general all page table modifications should use the V8 atomic | ||
99 | * swap instruction. This insures the mmu and the cpu are in sync | ||
100 | * with respect to ref/mod bits in the page tables. | ||
101 | */ | 140 | */ |
102 | static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) | 141 | BTFIXUPDEF_CALL_CONST(struct page *, pmd_page, pmd_t) |
103 | { | 142 | BTFIXUPDEF_CALL_CONST(unsigned long, pgd_page_vaddr, pgd_t) |
104 | __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr)); | ||
105 | return value; | ||
106 | } | ||
107 | 143 | ||
108 | /* Certain architectures need to do special things when pte's | 144 | #define pmd_page(pmd) BTFIXUP_CALL(pmd_page)(pmd) |
109 | * within a page table are directly modified. Thus, the following | 145 | #define pgd_page_vaddr(pgd) BTFIXUP_CALL(pgd_page_vaddr)(pgd) |
110 | * hook is made available. | ||
111 | */ | ||
112 | |||
113 | static inline void set_pte(pte_t *ptep, pte_t pteval) | ||
114 | { | ||
115 | srmmu_swap((unsigned long *)ptep, pte_val(pteval)); | ||
116 | } | ||
117 | |||
118 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
119 | |||
120 | static inline int srmmu_device_memory(unsigned long x) | ||
121 | { | ||
122 | return ((x & 0xF0000000) != 0); | ||
123 | } | ||
124 | |||
125 | static inline struct page *pmd_page(pmd_t pmd) | ||
126 | { | ||
127 | if (srmmu_device_memory(pmd_val(pmd))) | ||
128 | BUG(); | ||
129 | return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); | ||
130 | } | ||
131 | |||
132 | static inline unsigned long pgd_page_vaddr(pgd_t pgd) | ||
133 | { | ||
134 | if (srmmu_device_memory(pgd_val(pgd))) { | ||
135 | return ~0; | ||
136 | } else { | ||
137 | unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK; | ||
138 | return (unsigned long)__nocache_va(v << 4); | ||
139 | } | ||
140 | } | ||
141 | 146 | ||
142 | static inline int pte_present(pte_t pte) | 147 | BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t) |
143 | { | 148 | BTFIXUPDEF_CALL(void, pte_clear, pte_t *) |
144 | return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); | ||
145 | } | ||
146 | 149 | ||
147 | static inline int pte_none(pte_t pte) | 150 | static inline int pte_none(pte_t pte) |
148 | { | 151 | { |
149 | return !pte_val(pte); | 152 | return !pte_val(pte); |
150 | } | 153 | } |
151 | 154 | ||
152 | static inline void __pte_clear(pte_t *ptep) | 155 | #define pte_present(pte) BTFIXUP_CALL(pte_present)(pte) |
153 | { | 156 | #define pte_clear(mm,addr,pte) BTFIXUP_CALL(pte_clear)(pte) |
154 | set_pte(ptep, __pte(0)); | ||
155 | } | ||
156 | 157 | ||
157 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 158 | BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t) |
158 | { | 159 | BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t) |
159 | __pte_clear(ptep); | 160 | BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *) |
160 | } | ||
161 | |||
162 | static inline int pmd_bad(pmd_t pmd) | ||
163 | { | ||
164 | return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; | ||
165 | } | ||
166 | |||
167 | static inline int pmd_present(pmd_t pmd) | ||
168 | { | ||
169 | return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); | ||
170 | } | ||
171 | 161 | ||
172 | static inline int pmd_none(pmd_t pmd) | 162 | static inline int pmd_none(pmd_t pmd) |
173 | { | 163 | { |
174 | return !pmd_val(pmd); | 164 | return !pmd_val(pmd); |
175 | } | 165 | } |
176 | 166 | ||
177 | static inline void pmd_clear(pmd_t *pmdp) | 167 | #define pmd_bad(pmd) BTFIXUP_CALL(pmd_bad)(pmd) |
178 | { | 168 | #define pmd_present(pmd) BTFIXUP_CALL(pmd_present)(pmd) |
179 | int i; | 169 | #define pmd_clear(pmd) BTFIXUP_CALL(pmd_clear)(pmd) |
180 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) | ||
181 | set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); | ||
182 | } | ||
183 | 170 | ||
184 | static inline int pgd_none(pgd_t pgd) | 171 | BTFIXUPDEF_CALL_CONST(int, pgd_none, pgd_t) |
185 | { | 172 | BTFIXUPDEF_CALL_CONST(int, pgd_bad, pgd_t) |
186 | return !(pgd_val(pgd) & 0xFFFFFFF); | 173 | BTFIXUPDEF_CALL_CONST(int, pgd_present, pgd_t) |
187 | } | 174 | BTFIXUPDEF_CALL(void, pgd_clear, pgd_t *) |
188 | |||
189 | static inline int pgd_bad(pgd_t pgd) | ||
190 | { | ||
191 | return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; | ||
192 | } | ||
193 | |||
194 | static inline int pgd_present(pgd_t pgd) | ||
195 | { | ||
196 | return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); | ||
197 | } | ||
198 | 175 | ||
199 | static inline void pgd_clear(pgd_t *pgdp) | 176 | #define pgd_none(pgd) BTFIXUP_CALL(pgd_none)(pgd) |
200 | { | 177 | #define pgd_bad(pgd) BTFIXUP_CALL(pgd_bad)(pgd) |
201 | set_pte((pte_t *)pgdp, __pte(0)); | 178 | #define pgd_present(pgd) BTFIXUP_CALL(pgd_present)(pgd) |
202 | } | 179 | #define pgd_clear(pgd) BTFIXUP_CALL(pgd_clear)(pgd) |
203 | 180 | ||
204 | /* | 181 | /* |
205 | * The following only work if pte_present() is true. | 182 | * The following only work if pte_present() is true. |
206 | * Undefined behaviour if not.. | 183 | * Undefined behaviour if not.. |
207 | */ | 184 | */ |
185 | BTFIXUPDEF_HALF(pte_writei) | ||
186 | BTFIXUPDEF_HALF(pte_dirtyi) | ||
187 | BTFIXUPDEF_HALF(pte_youngi) | ||
188 | |||
189 | static int pte_write(pte_t pte) __attribute_const__; | ||
208 | static inline int pte_write(pte_t pte) | 190 | static inline int pte_write(pte_t pte) |
209 | { | 191 | { |
210 | return pte_val(pte) & SRMMU_WRITE; | 192 | return pte_val(pte) & BTFIXUP_HALF(pte_writei); |
211 | } | 193 | } |
212 | 194 | ||
195 | static int pte_dirty(pte_t pte) __attribute_const__; | ||
213 | static inline int pte_dirty(pte_t pte) | 196 | static inline int pte_dirty(pte_t pte) |
214 | { | 197 | { |
215 | return pte_val(pte) & SRMMU_DIRTY; | 198 | return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi); |
216 | } | 199 | } |
217 | 200 | ||
201 | static int pte_young(pte_t pte) __attribute_const__; | ||
218 | static inline int pte_young(pte_t pte) | 202 | static inline int pte_young(pte_t pte) |
219 | { | 203 | { |
220 | return pte_val(pte) & SRMMU_REF; | 204 | return pte_val(pte) & BTFIXUP_HALF(pte_youngi); |
221 | } | 205 | } |
222 | 206 | ||
223 | /* | 207 | /* |
224 | * The following only work if pte_present() is not true. | 208 | * The following only work if pte_present() is not true. |
225 | */ | 209 | */ |
210 | BTFIXUPDEF_HALF(pte_filei) | ||
211 | |||
212 | static int pte_file(pte_t pte) __attribute_const__; | ||
226 | static inline int pte_file(pte_t pte) | 213 | static inline int pte_file(pte_t pte) |
227 | { | 214 | { |
228 | return pte_val(pte) & SRMMU_FILE; | 215 | return pte_val(pte) & BTFIXUP_HALF(pte_filei); |
229 | } | 216 | } |
230 | 217 | ||
231 | static inline int pte_special(pte_t pte) | 218 | static inline int pte_special(pte_t pte) |
@@ -233,85 +220,68 @@ static inline int pte_special(pte_t pte) | |||
233 | return 0; | 220 | return 0; |
234 | } | 221 | } |
235 | 222 | ||
223 | /* | ||
224 | */ | ||
225 | BTFIXUPDEF_HALF(pte_wrprotecti) | ||
226 | BTFIXUPDEF_HALF(pte_mkcleani) | ||
227 | BTFIXUPDEF_HALF(pte_mkoldi) | ||
228 | |||
229 | static pte_t pte_wrprotect(pte_t pte) __attribute_const__; | ||
236 | static inline pte_t pte_wrprotect(pte_t pte) | 230 | static inline pte_t pte_wrprotect(pte_t pte) |
237 | { | 231 | { |
238 | return __pte(pte_val(pte) & ~SRMMU_WRITE); | 232 | return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti)); |
239 | } | 233 | } |
240 | 234 | ||
235 | static pte_t pte_mkclean(pte_t pte) __attribute_const__; | ||
241 | static inline pte_t pte_mkclean(pte_t pte) | 236 | static inline pte_t pte_mkclean(pte_t pte) |
242 | { | 237 | { |
243 | return __pte(pte_val(pte) & ~SRMMU_DIRTY); | 238 | return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani)); |
244 | } | 239 | } |
245 | 240 | ||
241 | static pte_t pte_mkold(pte_t pte) __attribute_const__; | ||
246 | static inline pte_t pte_mkold(pte_t pte) | 242 | static inline pte_t pte_mkold(pte_t pte) |
247 | { | 243 | { |
248 | return __pte(pte_val(pte) & ~SRMMU_REF); | 244 | return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi)); |
249 | } | ||
250 | |||
251 | static inline pte_t pte_mkwrite(pte_t pte) | ||
252 | { | ||
253 | return __pte(pte_val(pte) | SRMMU_WRITE); | ||
254 | } | 245 | } |
255 | 246 | ||
256 | static inline pte_t pte_mkdirty(pte_t pte) | 247 | BTFIXUPDEF_CALL_CONST(pte_t, pte_mkwrite, pte_t) |
257 | { | 248 | BTFIXUPDEF_CALL_CONST(pte_t, pte_mkdirty, pte_t) |
258 | return __pte(pte_val(pte) | SRMMU_DIRTY); | 249 | BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t) |
259 | } | ||
260 | 250 | ||
261 | static inline pte_t pte_mkyoung(pte_t pte) | 251 | #define pte_mkwrite(pte) BTFIXUP_CALL(pte_mkwrite)(pte) |
262 | { | 252 | #define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte) |
263 | return __pte(pte_val(pte) | SRMMU_REF); | 253 | #define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte) |
264 | } | ||
265 | 254 | ||
266 | #define pte_mkspecial(pte) (pte) | 255 | #define pte_mkspecial(pte) (pte) |
267 | 256 | ||
268 | #define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot) | 257 | #define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot) |
269 | 258 | ||
270 | static inline unsigned long pte_pfn(pte_t pte) | 259 | BTFIXUPDEF_CALL(unsigned long, pte_pfn, pte_t) |
271 | { | 260 | #define pte_pfn(pte) BTFIXUP_CALL(pte_pfn)(pte) |
272 | if (srmmu_device_memory(pte_val(pte))) { | ||
273 | /* Just return something that will cause | ||
274 | * pfn_valid() to return false. This makes | ||
275 | * copy_one_pte() to just directly copy to | ||
276 | * PTE over. | ||
277 | */ | ||
278 | return ~0UL; | ||
279 | } | ||
280 | return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); | ||
281 | } | ||
282 | |||
283 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) | 261 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
284 | 262 | ||
285 | /* | 263 | /* |
286 | * Conversion functions: convert a page and protection to a page entry, | 264 | * Conversion functions: convert a page and protection to a page entry, |
287 | * and a page entry and page directory to the page they refer to. | 265 | * and a page entry and page directory to the page they refer to. |
288 | */ | 266 | */ |
289 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) | 267 | BTFIXUPDEF_CALL_CONST(pte_t, mk_pte, struct page *, pgprot_t) |
290 | { | ||
291 | return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); | ||
292 | } | ||
293 | 268 | ||
294 | static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot) | 269 | BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_phys, unsigned long, pgprot_t) |
295 | { | 270 | BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int) |
296 | return __pte(((page) >> 4) | pgprot_val(pgprot)); | 271 | BTFIXUPDEF_CALL_CONST(pgprot_t, pgprot_noncached, pgprot_t) |
297 | } | ||
298 | 272 | ||
299 | static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space) | 273 | #define mk_pte(page,pgprot) BTFIXUP_CALL(mk_pte)(page,pgprot) |
300 | { | 274 | #define mk_pte_phys(page,pgprot) BTFIXUP_CALL(mk_pte_phys)(page,pgprot) |
301 | return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); | 275 | #define mk_pte_io(page,pgprot,space) BTFIXUP_CALL(mk_pte_io)(page,pgprot,space) |
302 | } | ||
303 | 276 | ||
304 | #define pgprot_noncached pgprot_noncached | 277 | #define pgprot_noncached(pgprot) BTFIXUP_CALL(pgprot_noncached)(pgprot) |
305 | static inline pgprot_t pgprot_noncached(pgprot_t prot) | 278 | |
306 | { | 279 | BTFIXUPDEF_INT(pte_modify_mask) |
307 | prot &= ~__pgprot(SRMMU_CACHE); | ||
308 | return prot; | ||
309 | } | ||
310 | 280 | ||
311 | static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__; | 281 | static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__; |
312 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 282 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
313 | { | 283 | { |
314 | return __pte((pte_val(pte) & SRMMU_CHG_MASK) | | 284 | return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) | |
315 | pgprot_val(newprot)); | 285 | pgprot_val(newprot)); |
316 | } | 286 | } |
317 | 287 | ||
@@ -324,76 +294,114 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
324 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 294 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
325 | 295 | ||
326 | /* Find an entry in the second-level page table.. */ | 296 | /* Find an entry in the second-level page table.. */ |
327 | static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address) | 297 | BTFIXUPDEF_CALL(pmd_t *, pmd_offset, pgd_t *, unsigned long) |
328 | { | 298 | #define pmd_offset(dir,addr) BTFIXUP_CALL(pmd_offset)(dir,addr) |
329 | return (pmd_t *) pgd_page_vaddr(*dir) + | ||
330 | ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); | ||
331 | } | ||
332 | 299 | ||
333 | /* Find an entry in the third-level page table.. */ | 300 | /* Find an entry in the third-level page table.. */ |
334 | pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address); | 301 | BTFIXUPDEF_CALL(pte_t *, pte_offset_kernel, pmd_t *, unsigned long) |
302 | #define pte_offset_kernel(dir,addr) BTFIXUP_CALL(pte_offset_kernel)(dir,addr) | ||
335 | 303 | ||
336 | /* | 304 | /* |
337 | * This shortcut works on sun4m (and sun4d) because the nocache area is static. | 305 | * This shortcut works on sun4m (and sun4d) because the nocache area is static, |
306 | * and sun4c is guaranteed to have no highmem anyway. | ||
338 | */ | 307 | */ |
339 | #define pte_offset_map(d, a) pte_offset_kernel(d,a) | 308 | #define pte_offset_map(d, a) pte_offset_kernel(d,a) |
340 | #define pte_unmap(pte) do{}while(0) | 309 | #define pte_unmap(pte) do{}while(0) |
341 | 310 | ||
311 | /* Certain architectures need to do special things when pte's | ||
312 | * within a page table are directly modified. Thus, the following | ||
313 | * hook is made available. | ||
314 | */ | ||
315 | |||
316 | BTFIXUPDEF_CALL(void, set_pte, pte_t *, pte_t) | ||
317 | |||
318 | #define set_pte(ptep,pteval) BTFIXUP_CALL(set_pte)(ptep,pteval) | ||
319 | #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) | ||
320 | |||
342 | struct seq_file; | 321 | struct seq_file; |
343 | void mmu_info(struct seq_file *m); | 322 | BTFIXUPDEF_CALL(void, mmu_info, struct seq_file *) |
323 | |||
324 | #define mmu_info(p) BTFIXUP_CALL(mmu_info)(p) | ||
344 | 325 | ||
345 | /* Fault handler stuff... */ | 326 | /* Fault handler stuff... */ |
346 | #define FAULT_CODE_PROT 0x1 | 327 | #define FAULT_CODE_PROT 0x1 |
347 | #define FAULT_CODE_WRITE 0x2 | 328 | #define FAULT_CODE_WRITE 0x2 |
348 | #define FAULT_CODE_USER 0x4 | 329 | #define FAULT_CODE_USER 0x4 |
349 | 330 | ||
350 | #define update_mmu_cache(vma, address, ptep) do { } while (0) | 331 | BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t *) |
351 | 332 | ||
352 | void srmmu_mapiorange(unsigned int bus, unsigned long xpa, | 333 | #define update_mmu_cache(vma,addr,ptep) BTFIXUP_CALL(update_mmu_cache)(vma,addr,ptep) |
353 | unsigned long xva, unsigned int len); | ||
354 | void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len); | ||
355 | 334 | ||
356 | /* Encode and de-code a swap entry */ | 335 | BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long, |
357 | static inline unsigned long __swp_type(swp_entry_t entry) | 336 | unsigned long, unsigned int) |
358 | { | 337 | BTFIXUPDEF_CALL(void, sparc_unmapiorange, unsigned long, unsigned int) |
359 | return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK; | 338 | #define sparc_mapiorange(bus,pa,va,len) BTFIXUP_CALL(sparc_mapiorange)(bus,pa,va,len) |
360 | } | 339 | #define sparc_unmapiorange(va,len) BTFIXUP_CALL(sparc_unmapiorange)(va,len) |
361 | 340 | ||
362 | static inline unsigned long __swp_offset(swp_entry_t entry) | 341 | extern int invalid_segment; |
363 | { | ||
364 | return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK; | ||
365 | } | ||
366 | 342 | ||
367 | static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset) | 343 | /* Encode and de-code a swap entry */ |
368 | { | 344 | BTFIXUPDEF_CALL(unsigned long, __swp_type, swp_entry_t) |
369 | return (swp_entry_t) { | 345 | BTFIXUPDEF_CALL(unsigned long, __swp_offset, swp_entry_t) |
370 | (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT | 346 | BTFIXUPDEF_CALL(swp_entry_t, __swp_entry, unsigned long, unsigned long) |
371 | | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT }; | 347 | |
372 | } | 348 | #define __swp_type(__x) BTFIXUP_CALL(__swp_type)(__x) |
349 | #define __swp_offset(__x) BTFIXUP_CALL(__swp_offset)(__x) | ||
350 | #define __swp_entry(__type,__off) BTFIXUP_CALL(__swp_entry)(__type,__off) | ||
373 | 351 | ||
374 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) | 352 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) |
375 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) | 353 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) |
376 | 354 | ||
377 | /* file-offset-in-pte helpers */ | 355 | /* file-offset-in-pte helpers */ |
378 | static inline unsigned long pte_to_pgoff(pte_t pte) | 356 | BTFIXUPDEF_CALL(unsigned long, pte_to_pgoff, pte_t pte); |
379 | { | 357 | BTFIXUPDEF_CALL(pte_t, pgoff_to_pte, unsigned long pgoff); |
380 | return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT; | ||
381 | } | ||
382 | 358 | ||
383 | static inline pte_t pgoff_to_pte(unsigned long pgoff) | 359 | #define pte_to_pgoff(pte) BTFIXUP_CALL(pte_to_pgoff)(pte) |
384 | { | 360 | #define pgoff_to_pte(off) BTFIXUP_CALL(pgoff_to_pte)(off) |
385 | return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE); | ||
386 | } | ||
387 | 361 | ||
388 | /* | 362 | /* |
389 | * This is made a constant because mm/fremap.c required a constant. | 363 | * This is made a constant because mm/fremap.c required a constant. |
364 | * Note that layout of these bits is different between sun4c.c and srmmu.c. | ||
390 | */ | 365 | */ |
391 | #define PTE_FILE_MAX_BITS 24 | 366 | #define PTE_FILE_MAX_BITS 24 |
392 | 367 | ||
368 | /* | ||
369 | */ | ||
370 | struct ctx_list { | ||
371 | struct ctx_list *next; | ||
372 | struct ctx_list *prev; | ||
373 | unsigned int ctx_number; | ||
374 | struct mm_struct *ctx_mm; | ||
375 | }; | ||
376 | |||
377 | extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */ | ||
378 | extern struct ctx_list ctx_free; /* Head of free list */ | ||
379 | extern struct ctx_list ctx_used; /* Head of used contexts list */ | ||
380 | |||
381 | #define NO_CONTEXT -1 | ||
382 | |||
383 | static inline void remove_from_ctx_list(struct ctx_list *entry) | ||
384 | { | ||
385 | entry->next->prev = entry->prev; | ||
386 | entry->prev->next = entry->next; | ||
387 | } | ||
388 | |||
389 | static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) | ||
390 | { | ||
391 | entry->next = head; | ||
392 | (entry->prev = head->prev)->next = entry; | ||
393 | head->prev = entry; | ||
394 | } | ||
395 | #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) | ||
396 | #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) | ||
397 | |||
393 | static inline unsigned long | 398 | static inline unsigned long |
394 | __get_phys (unsigned long addr) | 399 | __get_phys (unsigned long addr) |
395 | { | 400 | { |
396 | switch (sparc_cpu_model){ | 401 | switch (sparc_cpu_model){ |
402 | case sun4: | ||
403 | case sun4c: | ||
404 | return sun4c_get_pte (addr) << PAGE_SHIFT; | ||
397 | case sun4m: | 405 | case sun4m: |
398 | case sun4d: | 406 | case sun4d: |
399 | return ((srmmu_get_pte (addr) & 0xffffff00) << 4); | 407 | return ((srmmu_get_pte (addr) & 0xffffff00) << 4); |
@@ -406,6 +414,9 @@ static inline int | |||
406 | __get_iospace (unsigned long addr) | 414 | __get_iospace (unsigned long addr) |
407 | { | 415 | { |
408 | switch (sparc_cpu_model){ | 416 | switch (sparc_cpu_model){ |
417 | case sun4: | ||
418 | case sun4c: | ||
419 | return -1; /* Don't check iospace on sun4c */ | ||
409 | case sun4m: | 420 | case sun4m: |
410 | case sun4d: | 421 | case sun4d: |
411 | return (srmmu_get_pte (addr) >> 28); | 422 | return (srmmu_get_pte (addr) >> 28); |
@@ -452,7 +463,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, | |||
452 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ | 463 | set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \ |
453 | flush_tlb_page(__vma, __address); \ | 464 | flush_tlb_page(__vma, __address); \ |
454 | } \ | 465 | } \ |
455 | __changed; \ | 466 | (sparc_cpu_model == sun4c) || __changed; \ |
456 | }) | 467 | }) |
457 | 468 | ||
458 | #include <asm-generic/pgtable.h> | 469 | #include <asm-generic/pgtable.h> |
@@ -460,8 +471,10 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, | |||
460 | #endif /* !(__ASSEMBLY__) */ | 471 | #endif /* !(__ASSEMBLY__) */ |
461 | 472 | ||
462 | #define VMALLOC_START _AC(0xfe600000,UL) | 473 | #define VMALLOC_START _AC(0xfe600000,UL) |
474 | /* XXX Alter this when I get around to fixing sun4c - Anton */ | ||
463 | #define VMALLOC_END _AC(0xffc00000,UL) | 475 | #define VMALLOC_END _AC(0xffc00000,UL) |
464 | 476 | ||
477 | |||
465 | /* We provide our own get_unmapped_area to cope with VA holes for userland */ | 478 | /* We provide our own get_unmapped_area to cope with VA holes for userland */ |
466 | #define HAVE_ARCH_UNMAPPED_AREA | 479 | #define HAVE_ARCH_UNMAPPED_AREA |
467 | 480 | ||