aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-12 00:57:54 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:12:25 -0500
commitc4bce90ea2069e5a87beac806de3090ab32128d5 (patch)
tree3983a206c8060ef65ba17945d1c9f69e68d88b3d
parent490384e752a43aa281ed533e9de2da36df25c337 (diff)
[SPARC64]: Deal with PTE layout differences in SUN4V.
Yes, you heard it right, they changed the PTE layout for SUN4V. Ho hum... This is the simple and inefficient way to support this. It'll get optimized, don't worry. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/itlb_miss.S4
-rw-r--r--arch/sparc64/kernel/ktlb.S12
-rw-r--r--arch/sparc64/kernel/setup.c274
-rw-r--r--arch/sparc64/kernel/sun4v_tlb_miss.S3
-rw-r--r--arch/sparc64/kernel/tsb.S9
-rw-r--r--arch/sparc64/lib/clear_page.S8
-rw-r--r--arch/sparc64/lib/copy_page.S7
-rw-r--r--arch/sparc64/mm/fault.c2
-rw-r--r--arch/sparc64/mm/generic.c40
-rw-r--r--arch/sparc64/mm/init.c703
-rw-r--r--arch/sparc64/mm/tsb.c12
-rw-r--r--include/asm-sparc64/pgtable.h266
12 files changed, 717 insertions, 623 deletions
diff --git a/arch/sparc64/kernel/itlb_miss.S b/arch/sparc64/kernel/itlb_miss.S
index 97facce27aad..730caa4a1506 100644
--- a/arch/sparc64/kernel/itlb_miss.S
+++ b/arch/sparc64/kernel/itlb_miss.S
@@ -6,9 +6,10 @@
6 nop ! Delay slot (fill me) 6 nop ! Delay slot (fill me)
7 TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry 7 TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
8 cmp %g4, %g6 ! Compare TAG 8 cmp %g4, %g6 ! Compare TAG
9 sethi %hi(_PAGE_EXEC), %g4 ! Setup exec check 9 sethi %hi(PAGE_EXEC), %g4 ! Setup exec check
10 10
11/* ITLB ** ICACHE line 2: TSB compare and TLB load */ 11/* ITLB ** ICACHE line 2: TSB compare and TLB load */
12 ldx [%g4 + %lo(PAGE_EXEC)], %g4
12 bne,pn %xcc, tsb_miss_itlb ! Miss 13 bne,pn %xcc, tsb_miss_itlb ! Miss
13 mov FAULT_CODE_ITLB, %g3 14 mov FAULT_CODE_ITLB, %g3
14 andcc %g5, %g4, %g0 ! Executable? 15 andcc %g5, %g4, %g0 ! Executable?
@@ -16,7 +17,6 @@
16 nop ! Delay slot, fill me 17 nop ! Delay slot, fill me
17 stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB 18 stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB
18 retry ! Trap done 19 retry ! Trap done
19 nop
20 20
21/* ITLB ** ICACHE line 3: */ 21/* ITLB ** ICACHE line 3: */
22 nop 22 nop
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index 2d333ab4b91b..47dfd45971e8 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -131,16 +131,8 @@ kvmap_dtlb_4v:
131 brgez,pn %g4, kvmap_dtlb_nonlinear 131 brgez,pn %g4, kvmap_dtlb_nonlinear
132 nop 132 nop
133 133
134#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000) 134 sethi %hi(kern_linear_pte_xor), %g2
135#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W) 135 ldx [%g2 + %lo(kern_linear_pte_xor)], %g2
136
137 sethi %uhi(KERN_HIGHBITS), %g2
138 or %g2, %ulo(KERN_HIGHBITS), %g2
139 sllx %g2, 32, %g2
140 or %g2, KERN_LOWBITS, %g2
141
142#undef KERN_HIGHBITS
143#undef KERN_LOWBITS
144 136
145 .globl kvmap_linear_patch 137 .globl kvmap_linear_patch
146kvmap_linear_patch: 138kvmap_linear_patch:
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index f36b257b2e44..ca75f3b26a37 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -64,12 +64,6 @@ struct screen_info screen_info = {
64 16 /* orig-video-points */ 64 16 /* orig-video-points */
65}; 65};
66 66
67/* Typing sync at the prom prompt calls the function pointed to by
68 * the sync callback which I set to the following function.
69 * This should sync all filesystems and return, for now it just
70 * prints out pretty messages and returns.
71 */
72
73void (*prom_palette)(int); 67void (*prom_palette)(int);
74void (*prom_keyboard)(void); 68void (*prom_keyboard)(void);
75 69
@@ -79,263 +73,6 @@ prom_console_write(struct console *con, const char *s, unsigned n)
79 prom_write(s, n); 73 prom_write(s, n);
80} 74}
81 75
82static struct console prom_console = {
83 .name = "prom",
84 .write = prom_console_write,
85 .flags = CON_CONSDEV | CON_ENABLED,
86 .index = -1,
87};
88
89#define PROM_TRUE -1
90#define PROM_FALSE 0
91
92/* Pretty sick eh? */
93int prom_callback(long *args)
94{
95 struct console *cons, *saved_console = NULL;
96 unsigned long flags;
97 char *cmd;
98 extern spinlock_t prom_entry_lock;
99
100 if (!args)
101 return -1;
102 if (!(cmd = (char *)args[0]))
103 return -1;
104
105 /*
106 * The callback can be invoked on the cpu that first dropped
107 * into prom_cmdline after taking the serial interrupt, or on
108 * a slave processor that was smp_captured() if the
109 * administrator has done a switch-cpu inside obp. In either
110 * case, the cpu is marked as in-interrupt. Drop IRQ locks.
111 */
112 irq_exit();
113
114 /* XXX Revisit the locking here someday. This is a debugging
115 * XXX feature so it isnt all that critical. -DaveM
116 */
117 local_irq_save(flags);
118
119 spin_unlock(&prom_entry_lock);
120 cons = console_drivers;
121 while (cons) {
122 unregister_console(cons);
123 cons->flags &= ~(CON_PRINTBUFFER);
124 cons->next = saved_console;
125 saved_console = cons;
126 cons = console_drivers;
127 }
128 register_console(&prom_console);
129 if (!strcmp(cmd, "sync")) {
130 prom_printf("PROM `%s' command...\n", cmd);
131 show_free_areas();
132 if (current->pid != 0) {
133 local_irq_enable();
134 sys_sync();
135 local_irq_disable();
136 }
137 args[2] = 0;
138 args[args[1] + 3] = -1;
139 prom_printf("Returning to PROM\n");
140 } else if (!strcmp(cmd, "va>tte-data")) {
141 unsigned long ctx, va;
142 unsigned long tte = 0;
143 long res = PROM_FALSE;
144
145 ctx = args[3];
146 va = args[4];
147 if (ctx) {
148 /*
149 * Find process owning ctx, lookup mapping.
150 */
151 struct task_struct *p;
152 struct mm_struct *mm = NULL;
153 pgd_t *pgdp;
154 pud_t *pudp;
155 pmd_t *pmdp;
156 pte_t *ptep;
157 pte_t pte;
158
159 for_each_process(p) {
160 mm = p->mm;
161 if (CTX_NRBITS(mm->context) == ctx)
162 break;
163 }
164 if (!mm ||
165 CTX_NRBITS(mm->context) != ctx)
166 goto done;
167
168 pgdp = pgd_offset(mm, va);
169 if (pgd_none(*pgdp))
170 goto done;
171 pudp = pud_offset(pgdp, va);
172 if (pud_none(*pudp))
173 goto done;
174 pmdp = pmd_offset(pudp, va);
175 if (pmd_none(*pmdp))
176 goto done;
177
178 /* Preemption implicitly disabled by virtue of
179 * being called from inside OBP.
180 */
181 ptep = pte_offset_map(pmdp, va);
182 pte = *ptep;
183 if (pte_present(pte)) {
184 tte = pte_val(pte);
185 res = PROM_TRUE;
186 }
187 pte_unmap(ptep);
188 goto done;
189 }
190
191 if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
192 if (tlb_type == spitfire) {
193 extern unsigned long sparc64_kern_pri_context;
194
195 /* Spitfire Errata #32 workaround */
196 __asm__ __volatile__(
197 "stxa %0, [%1] %2\n\t"
198 "flush %%g6"
199 : /* No outputs */
200 : "r" (sparc64_kern_pri_context),
201 "r" (PRIMARY_CONTEXT),
202 "i" (ASI_DMMU));
203 }
204
205 /*
206 * Locked down tlb entry.
207 */
208
209 if (tlb_type == spitfire) {
210 tte = spitfire_get_dtlb_data(SPITFIRE_HIGHEST_LOCKED_TLBENT);
211 res = PROM_TRUE;
212 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
213 tte = cheetah_get_ldtlb_data(CHEETAH_HIGHEST_LOCKED_TLBENT);
214 res = PROM_TRUE;
215 }
216 goto done;
217 }
218
219 if (va < PGDIR_SIZE) {
220 /*
221 * vmalloc or prom_inherited mapping.
222 */
223 pgd_t *pgdp;
224 pud_t *pudp;
225 pmd_t *pmdp;
226 pte_t *ptep;
227 pte_t pte;
228 int error;
229
230 if ((va >= LOW_OBP_ADDRESS) && (va < HI_OBP_ADDRESS)) {
231 tte = prom_virt_to_phys(va, &error);
232 if (!error)
233 res = PROM_TRUE;
234 goto done;
235 }
236 pgdp = pgd_offset_k(va);
237 if (pgd_none(*pgdp))
238 goto done;
239 pudp = pud_offset(pgdp, va);
240 if (pud_none(*pudp))
241 goto done;
242 pmdp = pmd_offset(pudp, va);
243 if (pmd_none(*pmdp))
244 goto done;
245
246 /* Preemption implicitly disabled by virtue of
247 * being called from inside OBP.
248 */
249 ptep = pte_offset_kernel(pmdp, va);
250 pte = *ptep;
251 if (pte_present(pte)) {
252 tte = pte_val(pte);
253 res = PROM_TRUE;
254 }
255 goto done;
256 }
257
258 if (va < PAGE_OFFSET) {
259 /*
260 * No mappings here.
261 */
262 goto done;
263 }
264
265 if (va & (1UL << 40)) {
266 /*
267 * I/O page.
268 */
269
270 tte = (__pa(va) & _PAGE_PADDR) |
271 _PAGE_VALID | _PAGE_SZ4MB |
272 _PAGE_E | _PAGE_P | _PAGE_W;
273 res = PROM_TRUE;
274 goto done;
275 }
276
277 /*
278 * Normal page.
279 */
280 tte = (__pa(va) & _PAGE_PADDR) |
281 _PAGE_VALID | _PAGE_SZ4MB |
282 _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W;
283 res = PROM_TRUE;
284
285 done:
286 if (res == PROM_TRUE) {
287 args[2] = 3;
288 args[args[1] + 3] = 0;
289 args[args[1] + 4] = res;
290 args[args[1] + 5] = tte;
291 } else {
292 args[2] = 2;
293 args[args[1] + 3] = 0;
294 args[args[1] + 4] = res;
295 }
296 } else if (!strcmp(cmd, ".soft1")) {
297 unsigned long tte;
298
299 tte = args[3];
300 prom_printf("%lx:\"%s%s%s%s%s\" ",
301 (tte & _PAGE_SOFT) >> 7,
302 tte & _PAGE_MODIFIED ? "M" : "-",
303 tte & _PAGE_ACCESSED ? "A" : "-",
304 tte & _PAGE_READ ? "W" : "-",
305 tte & _PAGE_WRITE ? "R" : "-",
306 tte & _PAGE_PRESENT ? "P" : "-");
307
308 args[2] = 2;
309 args[args[1] + 3] = 0;
310 args[args[1] + 4] = PROM_TRUE;
311 } else if (!strcmp(cmd, ".soft2")) {
312 unsigned long tte;
313
314 tte = args[3];
315 prom_printf("%lx ", (tte & 0x07FC000000000000UL) >> 50);
316
317 args[2] = 2;
318 args[args[1] + 3] = 0;
319 args[args[1] + 4] = PROM_TRUE;
320 } else {
321 prom_printf("unknown PROM `%s' command...\n", cmd);
322 }
323 unregister_console(&prom_console);
324 while (saved_console) {
325 cons = saved_console;
326 saved_console = cons->next;
327 register_console(cons);
328 }
329 spin_lock(&prom_entry_lock);
330 local_irq_restore(flags);
331
332 /*
333 * Restore in-interrupt status for a resume from obp.
334 */
335 irq_enter();
336 return 0;
337}
338
339unsigned int boot_flags = 0; 76unsigned int boot_flags = 0;
340#define BOOTME_DEBUG 0x1 77#define BOOTME_DEBUG 0x1
341#define BOOTME_SINGLE 0x2 78#define BOOTME_SINGLE 0x2
@@ -483,17 +220,6 @@ char reboot_command[COMMAND_LINE_SIZE];
483 220
484static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; 221static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
485 222
486void register_prom_callbacks(void)
487{
488 prom_setcallback(prom_callback);
489 prom_feval(": linux-va>tte-data 2 \" va>tte-data\" $callback drop ; "
490 "' linux-va>tte-data to va>tte-data");
491 prom_feval(": linux-.soft1 1 \" .soft1\" $callback 2drop ; "
492 "' linux-.soft1 to .soft1");
493 prom_feval(": linux-.soft2 1 \" .soft2\" $callback 2drop ; "
494 "' linux-.soft2 to .soft2");
495}
496
497static void __init per_cpu_patch(void) 223static void __init per_cpu_patch(void)
498{ 224{
499#ifdef CONFIG_SMP 225#ifdef CONFIG_SMP
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
index 597359ced233..950ca74b4a58 100644
--- a/arch/sparc64/kernel/sun4v_tlb_miss.S
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -59,7 +59,8 @@ sun4v_itlb_miss:
59 /* Load TSB tag/pte into %g2/%g3 and compare the tag. */ 59 /* Load TSB tag/pte into %g2/%g3 and compare the tag. */
60 ldda [%g1] ASI_QUAD_LDD_PHYS, %g2 60 ldda [%g1] ASI_QUAD_LDD_PHYS, %g2
61 cmp %g2, %g6 61 cmp %g2, %g6
62 sethi %hi(_PAGE_EXEC), %g7 62 sethi %hi(PAGE_EXEC), %g7
63 ldx [%g7 + %lo(PAGE_EXEC)], %g7
63 bne,a,pn %xcc, tsb_miss_page_table_walk 64 bne,a,pn %xcc, tsb_miss_page_table_walk
64 mov FAULT_CODE_ITLB, %g3 65 mov FAULT_CODE_ITLB, %g3
65 andcc %g3, %g7, %g0 66 andcc %g3, %g7, %g0
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 667dcb077be7..be8f0892d721 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -56,10 +56,11 @@ tsb_reload:
56 /* If it is larger than the base page size, don't 56 /* If it is larger than the base page size, don't
57 * bother putting it into the TSB. 57 * bother putting it into the TSB.
58 */ 58 */
59 srlx %g5, 32, %g2 59 sethi %hi(_PAGE_ALL_SZ_BITS), %g7
60 sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g7 60 ldx [%g7 + %lo(_PAGE_ALL_SZ_BITS)], %g7
61 and %g2, %g7, %g2 61 and %g5, %g7, %g2
62 sethi %hi(_PAGE_SZBITS >> 32), %g7 62 sethi %hi(_PAGE_SZBITS), %g7
63 ldx [%g7 + %lo(_PAGE_SZBITS)], %g7
63 cmp %g2, %g7 64 cmp %g2, %g7
64 bne,a,pn %xcc, tsb_tlb_reload 65 bne,a,pn %xcc, tsb_tlb_reload
65 TSB_STORE(%g1, %g0) 66 TSB_STORE(%g1, %g0)
diff --git a/arch/sparc64/lib/clear_page.S b/arch/sparc64/lib/clear_page.S
index cdc634bceba0..77e531f6c2a7 100644
--- a/arch/sparc64/lib/clear_page.S
+++ b/arch/sparc64/lib/clear_page.S
@@ -23,9 +23,6 @@
23 * disable preemption during the clear. 23 * disable preemption during the clear.
24 */ 24 */
25 25
26#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
27#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
28
29 .text 26 .text
30 27
31 .globl _clear_page 28 .globl _clear_page
@@ -44,12 +41,11 @@ clear_user_page: /* %o0=dest, %o1=vaddr */
44 sethi %hi(PAGE_SIZE), %o4 41 sethi %hi(PAGE_SIZE), %o4
45 42
46 sllx %g2, 32, %g2 43 sllx %g2, 32, %g2
47 sethi %uhi(TTE_BITS_TOP), %g3 44 sethi %hi(PAGE_KERNEL_LOCKED), %g3
48 45
49 sllx %g3, 32, %g3 46 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
50 sub %o0, %g2, %g1 ! paddr 47 sub %o0, %g2, %g1 ! paddr
51 48
52 or %g3, TTE_BITS_BOTTOM, %g3
53 and %o1, %o4, %o0 ! vaddr D-cache alias bit 49 and %o1, %o4, %o0 ! vaddr D-cache alias bit
54 50
55 or %g1, %g3, %g1 ! TTE data 51 or %g1, %g3, %g1 ! TTE data
diff --git a/arch/sparc64/lib/copy_page.S b/arch/sparc64/lib/copy_page.S
index feebb14fd27a..37460666a5c3 100644
--- a/arch/sparc64/lib/copy_page.S
+++ b/arch/sparc64/lib/copy_page.S
@@ -23,8 +23,6 @@
23 * disable preemption during the clear. 23 * disable preemption during the clear.
24 */ 24 */
25 25
26#define TTE_BITS_TOP (_PAGE_VALID | _PAGE_SZBITS)
27#define TTE_BITS_BOTTOM (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W)
28#define DCACHE_SIZE (PAGE_SIZE * 2) 26#define DCACHE_SIZE (PAGE_SIZE * 2)
29 27
30#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19) 28#if (PAGE_SHIFT == 13) || (PAGE_SHIFT == 19)
@@ -52,13 +50,12 @@ copy_user_page: /* %o0=dest, %o1=src, %o2=vaddr */
52 sethi %hi(PAGE_SIZE), %o3 50 sethi %hi(PAGE_SIZE), %o3
53 51
54 sllx %g2, 32, %g2 52 sllx %g2, 32, %g2
55 sethi %uhi(TTE_BITS_TOP), %g3 53 sethi %hi(PAGE_KERNEL_LOCKED), %g3
56 54
57 sllx %g3, 32, %g3 55 ldx [%g3 + %lo(PAGE_KERNEL_LOCKED)], %g3
58 sub %o0, %g2, %g1 ! dest paddr 56 sub %o0, %g2, %g1 ! dest paddr
59 57
60 sub %o1, %g2, %g2 ! src paddr 58 sub %o1, %g2, %g2 ! src paddr
61 or %g3, TTE_BITS_BOTTOM, %g3
62 59
63 and %o2, %o3, %o0 ! vaddr D-cache alias bit 60 and %o2, %o3, %o0 ! vaddr D-cache alias bit
64 or %g1, %g3, %g1 ! dest TTE data 61 or %g1, %g3, %g1 ! dest TTE data
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 6f0539aa44d0..439a53c1e560 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -137,7 +137,7 @@ static unsigned int get_user_insn(unsigned long tpc)
137 if (!pte_present(pte)) 137 if (!pte_present(pte))
138 goto out; 138 goto out;
139 139
140 pa = (pte_val(pte) & _PAGE_PADDR); 140 pa = (pte_pfn(pte) << PAGE_SHIFT);
141 pa += (tpc & ~PAGE_MASK); 141 pa += (tpc & ~PAGE_MASK);
142 142
143 /* Use phys bypass so we don't pollute dtlb/dcache. */ 143 /* Use phys bypass so we don't pollute dtlb/dcache. */
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c
index 580b63da836b..5fc5c579e35e 100644
--- a/arch/sparc64/mm/generic.c
+++ b/arch/sparc64/mm/generic.c
@@ -15,15 +15,6 @@
15#include <asm/page.h> 15#include <asm/page.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17 17
18static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
19{
20 pte_t pte;
21 pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) &
22 ~(unsigned long)_PAGE_CACHE);
23 pte_val(pte) |= (((unsigned long)space) << 32);
24 return pte;
25}
26
27/* Remap IO memory, the same way as remap_pfn_range(), but use 18/* Remap IO memory, the same way as remap_pfn_range(), but use
28 * the obio memory space. 19 * the obio memory space.
29 * 20 *
@@ -48,24 +39,29 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte,
48 pte_t entry; 39 pte_t entry;
49 unsigned long curend = address + PAGE_SIZE; 40 unsigned long curend = address + PAGE_SIZE;
50 41
51 entry = mk_pte_io(offset, prot, space); 42 entry = mk_pte_io(offset, prot, space, PAGE_SIZE);
52 if (!(address & 0xffff)) { 43 if (!(address & 0xffff)) {
53 if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) { 44 if (PAGE_SIZE < (4 * 1024 * 1024) &&
54 entry = mk_pte_io(offset, 45 !(address & 0x3fffff) &&
55 __pgprot(pgprot_val (prot) | _PAGE_SZ4MB), 46 !(offset & 0x3ffffe) &&
56 space); 47 end >= address + 0x400000) {
48 entry = mk_pte_io(offset, prot, space,
49 4 * 1024 * 1024);
57 curend = address + 0x400000; 50 curend = address + 0x400000;
58 offset += 0x400000; 51 offset += 0x400000;
59 } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) { 52 } else if (PAGE_SIZE < (512 * 1024) &&
60 entry = mk_pte_io(offset, 53 !(address & 0x7ffff) &&
61 __pgprot(pgprot_val (prot) | _PAGE_SZ512K), 54 !(offset & 0x7fffe) &&
62 space); 55 end >= address + 0x80000) {
56 entry = mk_pte_io(offset, prot, space,
57 512 * 1024 * 1024);
63 curend = address + 0x80000; 58 curend = address + 0x80000;
64 offset += 0x80000; 59 offset += 0x80000;
65 } else if (!(offset & 0xfffe) && end >= address + 0x10000) { 60 } else if (PAGE_SIZE < (64 * 1024) &&
66 entry = mk_pte_io(offset, 61 !(offset & 0xfffe) &&
67 __pgprot(pgprot_val (prot) | _PAGE_SZ64K), 62 end >= address + 0x10000) {
68 space); 63 entry = mk_pte_io(offset, prot, space,
64 64 * 1024);
69 curend = address + 0x10000; 65 curend = address + 0x10000;
70 offset += 0x10000; 66 offset += 0x10000;
71 } else 67 } else
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 92756da273bd..9c2fc239f3ee 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/module.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/sched.h> 11#include <linux/sched.h>
11#include <linux/string.h> 12#include <linux/string.h>
@@ -118,6 +119,7 @@ unsigned long phys_base __read_mostly;
118unsigned long kern_base __read_mostly; 119unsigned long kern_base __read_mostly;
119unsigned long kern_size __read_mostly; 120unsigned long kern_size __read_mostly;
120unsigned long pfn_base __read_mostly; 121unsigned long pfn_base __read_mostly;
122unsigned long kern_linear_pte_xor __read_mostly;
121 123
122/* get_new_mmu_context() uses "cache + 1". */ 124/* get_new_mmu_context() uses "cache + 1". */
123DEFINE_SPINLOCK(ctx_alloc_lock); 125DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -256,6 +258,9 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long
256 __tsb_insert(tsb_addr, tag, pte); 258 __tsb_insert(tsb_addr, tag, pte);
257} 259}
258 260
261unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
262unsigned long _PAGE_SZBITS __read_mostly;
263
259void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 264void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
260{ 265{
261 struct mm_struct *mm; 266 struct mm_struct *mm;
@@ -398,39 +403,9 @@ struct linux_prom_translation {
398struct linux_prom_translation prom_trans[512] __read_mostly; 403struct linux_prom_translation prom_trans[512] __read_mostly;
399unsigned int prom_trans_ents __read_mostly; 404unsigned int prom_trans_ents __read_mostly;
400 405
401extern unsigned long prom_boot_page;
402extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
403extern int prom_get_mmu_ihandle(void);
404extern void register_prom_callbacks(void);
405
406/* Exported for SMP bootup purposes. */ 406/* Exported for SMP bootup purposes. */
407unsigned long kern_locked_tte_data; 407unsigned long kern_locked_tte_data;
408 408
409/*
410 * Translate PROM's mapping we capture at boot time into physical address.
411 * The second parameter is only set from prom_callback() invocations.
412 */
413unsigned long prom_virt_to_phys(unsigned long promva, int *error)
414{
415 int i;
416
417 for (i = 0; i < prom_trans_ents; i++) {
418 struct linux_prom_translation *p = &prom_trans[i];
419
420 if (promva >= p->virt &&
421 promva < (p->virt + p->size)) {
422 unsigned long base = p->data & _PAGE_PADDR;
423
424 if (error)
425 *error = 0;
426 return base + (promva & (8192 - 1));
427 }
428 }
429 if (error)
430 *error = 1;
431 return 0UL;
432}
433
434/* The obp translations are saved based on 8k pagesize, since obp can 409/* The obp translations are saved based on 8k pagesize, since obp can
435 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> 410 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
436 * HI_OBP_ADDRESS range are handled in ktlb.S. 411 * HI_OBP_ADDRESS range are handled in ktlb.S.
@@ -537,6 +512,8 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
537 "3" (arg2), "4" (arg3)); 512 "3" (arg2), "4" (arg3));
538} 513}
539 514
515static unsigned long kern_large_tte(unsigned long paddr);
516
540static void __init remap_kernel(void) 517static void __init remap_kernel(void)
541{ 518{
542 unsigned long phys_page, tte_vaddr, tte_data; 519 unsigned long phys_page, tte_vaddr, tte_data;
@@ -544,9 +521,7 @@ static void __init remap_kernel(void)
544 521
545 tte_vaddr = (unsigned long) KERNBASE; 522 tte_vaddr = (unsigned long) KERNBASE;
546 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 523 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
547 tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | 524 tte_data = kern_large_tte(phys_page);
548 _PAGE_CP | _PAGE_CV | _PAGE_P |
549 _PAGE_L | _PAGE_W));
550 525
551 kern_locked_tte_data = tte_data; 526 kern_locked_tte_data = tte_data;
552 527
@@ -591,10 +566,6 @@ static void __init inherit_prom_mappings(void)
591 prom_printf("Remapping the kernel... "); 566 prom_printf("Remapping the kernel... ");
592 remap_kernel(); 567 remap_kernel();
593 prom_printf("done.\n"); 568 prom_printf("done.\n");
594
595 prom_printf("Registering callbacks... ");
596 register_prom_callbacks();
597 prom_printf("done.\n");
598} 569}
599 570
600void prom_world(int enter) 571void prom_world(int enter)
@@ -631,63 +602,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
631} 602}
632#endif /* DCACHE_ALIASING_POSSIBLE */ 603#endif /* DCACHE_ALIASING_POSSIBLE */
633 604
634/* If not locked, zap it. */
635void __flush_tlb_all(void)
636{
637 unsigned long pstate;
638 int i;
639
640 __asm__ __volatile__("flushw\n\t"
641 "rdpr %%pstate, %0\n\t"
642 "wrpr %0, %1, %%pstate"
643 : "=r" (pstate)
644 : "i" (PSTATE_IE));
645 if (tlb_type == spitfire) {
646 for (i = 0; i < 64; i++) {
647 /* Spitfire Errata #32 workaround */
648 /* NOTE: Always runs on spitfire, so no
649 * cheetah+ page size encodings.
650 */
651 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
652 "flush %%g6"
653 : /* No outputs */
654 : "r" (0),
655 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
656
657 if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
658 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
659 "membar #Sync"
660 : /* no outputs */
661 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
662 spitfire_put_dtlb_data(i, 0x0UL);
663 }
664
665 /* Spitfire Errata #32 workaround */
666 /* NOTE: Always runs on spitfire, so no
667 * cheetah+ page size encodings.
668 */
669 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
670 "flush %%g6"
671 : /* No outputs */
672 : "r" (0),
673 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
674
675 if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
676 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
677 "membar #Sync"
678 : /* no outputs */
679 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
680 spitfire_put_itlb_data(i, 0x0UL);
681 }
682 }
683 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
684 cheetah_flush_dtlb_all();
685 cheetah_flush_itlb_all();
686 }
687 __asm__ __volatile__("wrpr %0, 0, %%pstate"
688 : : "r" (pstate));
689}
690
691/* Caller does TLB context flushing on local CPU if necessary. 605/* Caller does TLB context flushing on local CPU if necessary.
692 * The caller also ensures that CTX_VALID(mm->context) is false. 606 * The caller also ensures that CTX_VALID(mm->context) is false.
693 * 607 *
@@ -1180,6 +1094,9 @@ extern void sun4v_patch_tlb_handlers(void);
1180static unsigned long last_valid_pfn; 1094static unsigned long last_valid_pfn;
1181pgd_t swapper_pg_dir[2048]; 1095pgd_t swapper_pg_dir[2048];
1182 1096
1097static void sun4u_pgprot_init(void);
1098static void sun4v_pgprot_init(void);
1099
1183void __init paging_init(void) 1100void __init paging_init(void)
1184{ 1101{
1185 unsigned long end_pfn, pages_avail, shift; 1102 unsigned long end_pfn, pages_avail, shift;
@@ -1188,6 +1105,11 @@ void __init paging_init(void)
1188 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1105 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1189 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1106 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1190 1107
1108 if (tlb_type == hypervisor)
1109 sun4v_pgprot_init();
1110 else
1111 sun4u_pgprot_init();
1112
1191 if (tlb_type == cheetah_plus || 1113 if (tlb_type == cheetah_plus ||
1192 tlb_type == hypervisor) 1114 tlb_type == hypervisor)
1193 tsb_phys_patch(); 1115 tsb_phys_patch();
@@ -1411,3 +1333,596 @@ void free_initrd_mem(unsigned long start, unsigned long end)
1411 } 1333 }
1412} 1334}
1413#endif 1335#endif
1336
1337/* SUN4U pte bits... */
1338#define _PAGE_SZ4MB_4U 0x6000000000000000 /* 4MB Page */
1339#define _PAGE_SZ512K_4U 0x4000000000000000 /* 512K Page */
1340#define _PAGE_SZ64K_4U 0x2000000000000000 /* 64K Page */
1341#define _PAGE_SZ8K_4U 0x0000000000000000 /* 8K Page */
1342#define _PAGE_NFO_4U 0x1000000000000000 /* No Fault Only */
1343#define _PAGE_IE_4U 0x0800000000000000 /* Invert Endianness */
1344#define _PAGE_SOFT2_4U 0x07FC000000000000 /* Software bits, set 2 */
1345#define _PAGE_RES1_4U 0x0002000000000000 /* Reserved */
1346#define _PAGE_SZ32MB_4U 0x0001000000000000 /* (Panther) 32MB page */
1347#define _PAGE_SZ256MB_4U 0x2001000000000000 /* (Panther) 256MB page */
1348#define _PAGE_SN_4U 0x0000800000000000 /* (Cheetah) Snoop */
1349#define _PAGE_RES2_4U 0x0000780000000000 /* Reserved */
1350#define _PAGE_PADDR_4U 0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */
1351#define _PAGE_SOFT_4U 0x0000000000001F80 /* Software bits: */
1352#define _PAGE_EXEC_4U 0x0000000000001000 /* Executable SW bit */
1353#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty) */
1354#define _PAGE_FILE_4U 0x0000000000000800 /* Pagecache page */
1355#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd) */
1356#define _PAGE_READ_4U 0x0000000000000200 /* Readable SW Bit */
1357#define _PAGE_WRITE_4U 0x0000000000000100 /* Writable SW Bit */
1358#define _PAGE_PRESENT_4U 0x0000000000000080 /* Present */
1359#define _PAGE_L_4U 0x0000000000000040 /* Locked TTE */
1360#define _PAGE_CP_4U 0x0000000000000020 /* Cacheable in P-Cache */
1361#define _PAGE_CV_4U 0x0000000000000010 /* Cacheable in V-Cache */
1362#define _PAGE_E_4U 0x0000000000000008 /* side-Effect */
1363#define _PAGE_P_4U 0x0000000000000004 /* Privileged Page */
1364#define _PAGE_W_4U 0x0000000000000002 /* Writable */
1365
1366/* SUN4V pte bits... */
1367#define _PAGE_NFO_4V 0x4000000000000000 /* No Fault Only */
1368#define _PAGE_SOFT2_4V 0x3F00000000000000 /* Software bits, set 2 */
1369#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty) */
1370#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd) */
1371#define _PAGE_READ_4V 0x0800000000000000 /* Readable SW Bit */
1372#define _PAGE_WRITE_4V 0x0400000000000000 /* Writable SW Bit */
1373#define _PAGE_PADDR_4V 0x00FFFFFFFFFFE000 /* paddr[55:13] */
1374#define _PAGE_IE_4V 0x0000000000001000 /* Invert Endianness */
1375#define _PAGE_E_4V 0x0000000000000800 /* side-Effect */
1376#define _PAGE_CP_4V 0x0000000000000400 /* Cacheable in P-Cache */
1377#define _PAGE_CV_4V 0x0000000000000200 /* Cacheable in V-Cache */
1378#define _PAGE_P_4V 0x0000000000000100 /* Privileged Page */
1379#define _PAGE_EXEC_4V 0x0000000000000080 /* Executable Page */
1380#define _PAGE_W_4V 0x0000000000000040 /* Writable */
1381#define _PAGE_SOFT_4V 0x0000000000000030 /* Software bits */
1382#define _PAGE_FILE_4V 0x0000000000000020 /* Pagecache page */
1383#define _PAGE_PRESENT_4V 0x0000000000000010 /* Present */
1384#define _PAGE_RESV_4V 0x0000000000000008 /* Reserved */
1385#define _PAGE_SZ16GB_4V 0x0000000000000007 /* 16GB Page */
1386#define _PAGE_SZ2GB_4V 0x0000000000000006 /* 2GB Page */
1387#define _PAGE_SZ256MB_4V 0x0000000000000005 /* 256MB Page */
1388#define _PAGE_SZ32MB_4V 0x0000000000000004 /* 32MB Page */
1389#define _PAGE_SZ4MB_4V 0x0000000000000003 /* 4MB Page */
1390#define _PAGE_SZ512K_4V 0x0000000000000002 /* 512K Page */
1391#define _PAGE_SZ64K_4V 0x0000000000000001 /* 64K Page */
1392#define _PAGE_SZ8K_4V 0x0000000000000000 /* 8K Page */
1393
1394#if PAGE_SHIFT == 13
1395#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
1396#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
1397#elif PAGE_SHIFT == 16
1398#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U
1399#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V
1400#elif PAGE_SHIFT == 19
1401#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U
1402#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V
1403#elif PAGE_SHIFT == 22
1404#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U
1405#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V
1406#else
1407#error Wrong PAGE_SHIFT specified
1408#endif
1409
1410#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
1411#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
1412#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
1413#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
1414#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U
1415#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V
1416#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
1417#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U
1418#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V
1419#endif
1420
1421#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
1422#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
1423#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
1424#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
1425#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
1426#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
1427
1428pgprot_t PAGE_KERNEL __read_mostly;
1429EXPORT_SYMBOL(PAGE_KERNEL);
1430
1431pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
1432pgprot_t PAGE_COPY __read_mostly;
1433pgprot_t PAGE_EXEC __read_mostly;
1434unsigned long pg_iobits __read_mostly;
1435
1436unsigned long _PAGE_IE __read_mostly;
1437unsigned long _PAGE_E __read_mostly;
1438unsigned long _PAGE_CACHE __read_mostly;
1439
1440static void prot_init_common(unsigned long page_none,
1441 unsigned long page_shared,
1442 unsigned long page_copy,
1443 unsigned long page_readonly,
1444 unsigned long page_exec_bit)
1445{
1446 PAGE_COPY = __pgprot(page_copy);
1447
1448 protection_map[0x0] = __pgprot(page_none);
1449 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
1450 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
1451 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
1452 protection_map[0x4] = __pgprot(page_readonly);
1453 protection_map[0x5] = __pgprot(page_readonly);
1454 protection_map[0x6] = __pgprot(page_copy);
1455 protection_map[0x7] = __pgprot(page_copy);
1456 protection_map[0x8] = __pgprot(page_none);
1457 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
1458 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
1459 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
1460 protection_map[0xc] = __pgprot(page_readonly);
1461 protection_map[0xd] = __pgprot(page_readonly);
1462 protection_map[0xe] = __pgprot(page_shared);
1463 protection_map[0xf] = __pgprot(page_shared);
1464}
1465
1466static void __init sun4u_pgprot_init(void)
1467{
1468 unsigned long page_none, page_shared, page_copy, page_readonly;
1469 unsigned long page_exec_bit;
1470
1471 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1472 _PAGE_CACHE_4U | _PAGE_P_4U |
1473 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1474 _PAGE_EXEC_4U);
1475 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1476 _PAGE_CACHE_4U | _PAGE_P_4U |
1477 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1478 _PAGE_EXEC_4U | _PAGE_L_4U);
1479 PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
1480
1481 _PAGE_IE = _PAGE_IE_4U;
1482 _PAGE_E = _PAGE_E_4U;
1483 _PAGE_CACHE = _PAGE_CACHE_4U;
1484
1485 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
1486 __ACCESS_BITS_4U | _PAGE_E_4U);
1487
1488 kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
1489 0xfffff80000000000;
1490 kern_linear_pte_xor |= (_PAGE_CP_4U | _PAGE_CV_4U |
1491 _PAGE_P_4U | _PAGE_W_4U);
1492
1493 _PAGE_SZBITS = _PAGE_SZBITS_4U;
1494 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
1495 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
1496 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
1497
1498
1499 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
1500 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1501 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
1502 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1503 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1504 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1505 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1506
1507 page_exec_bit = _PAGE_EXEC_4U;
1508
1509 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1510 page_exec_bit);
1511}
1512
1513static void __init sun4v_pgprot_init(void)
1514{
1515 unsigned long page_none, page_shared, page_copy, page_readonly;
1516 unsigned long page_exec_bit;
1517
1518 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
1519 _PAGE_CACHE_4V | _PAGE_P_4V |
1520 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
1521 _PAGE_EXEC_4V);
1522 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
1523 PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
1524
1525 _PAGE_IE = _PAGE_IE_4V;
1526 _PAGE_E = _PAGE_E_4V;
1527 _PAGE_CACHE = _PAGE_CACHE_4V;
1528
1529 kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
1530 0xfffff80000000000;
1531 kern_linear_pte_xor |= (_PAGE_CP_4V | _PAGE_CV_4V |
1532 _PAGE_P_4V | _PAGE_W_4V);
1533
1534 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
1535 __ACCESS_BITS_4V | _PAGE_E_4V);
1536
1537 _PAGE_SZBITS = _PAGE_SZBITS_4V;
1538 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
1539 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
1540 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
1541 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
1542
1543 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
1544 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1545 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
1546 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1547 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1548 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1549 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1550
1551 page_exec_bit = _PAGE_EXEC_4V;
1552
1553 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1554 page_exec_bit);
1555}
1556
1557unsigned long pte_sz_bits(unsigned long sz)
1558{
1559 if (tlb_type == hypervisor) {
1560 switch (sz) {
1561 case 8 * 1024:
1562 default:
1563 return _PAGE_SZ8K_4V;
1564 case 64 * 1024:
1565 return _PAGE_SZ64K_4V;
1566 case 512 * 1024:
1567 return _PAGE_SZ512K_4V;
1568 case 4 * 1024 * 1024:
1569 return _PAGE_SZ4MB_4V;
1570 };
1571 } else {
1572 switch (sz) {
1573 case 8 * 1024:
1574 default:
1575 return _PAGE_SZ8K_4U;
1576 case 64 * 1024:
1577 return _PAGE_SZ64K_4U;
1578 case 512 * 1024:
1579 return _PAGE_SZ512K_4U;
1580 case 4 * 1024 * 1024:
1581 return _PAGE_SZ4MB_4U;
1582 };
1583 }
1584}
1585
1586pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
1587{
1588 pte_t pte;
1589 if (tlb_type == hypervisor) {
1590 pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) &
1591 ~(unsigned long)_PAGE_CACHE_4V);
1592 } else {
1593 pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) &
1594 ~(unsigned long)_PAGE_CACHE_4U);
1595 }
1596 pte_val(pte) |= (((unsigned long)space) << 32);
1597 pte_val(pte) |= pte_sz_bits(page_size);
1598 return pte;
1599}
1600
1601unsigned long pte_present(pte_t pte)
1602{
1603 return (pte_val(pte) &
1604 ((tlb_type == hypervisor) ?
1605 _PAGE_PRESENT_4V : _PAGE_PRESENT_4U));
1606}
1607
1608unsigned long pte_file(pte_t pte)
1609{
1610 return (pte_val(pte) &
1611 ((tlb_type == hypervisor) ?
1612 _PAGE_FILE_4V : _PAGE_FILE_4U));
1613}
1614
1615unsigned long pte_read(pte_t pte)
1616{
1617 return (pte_val(pte) &
1618 ((tlb_type == hypervisor) ?
1619 _PAGE_READ_4V : _PAGE_READ_4U));
1620}
1621
1622unsigned long pte_exec(pte_t pte)
1623{
1624 return (pte_val(pte) &
1625 ((tlb_type == hypervisor) ?
1626 _PAGE_EXEC_4V : _PAGE_EXEC_4U));
1627}
1628
1629unsigned long pte_write(pte_t pte)
1630{
1631 return (pte_val(pte) &
1632 ((tlb_type == hypervisor) ?
1633 _PAGE_WRITE_4V : _PAGE_WRITE_4U));
1634}
1635
1636unsigned long pte_dirty(pte_t pte)
1637{
1638 return (pte_val(pte) &
1639 ((tlb_type == hypervisor) ?
1640 _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U));
1641}
1642
1643unsigned long pte_young(pte_t pte)
1644{
1645 return (pte_val(pte) &
1646 ((tlb_type == hypervisor) ?
1647 _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U));
1648}
1649
1650pte_t pte_wrprotect(pte_t pte)
1651{
1652 unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U;
1653
1654 if (tlb_type == hypervisor)
1655 mask = _PAGE_WRITE_4V | _PAGE_W_4V;
1656
1657 return __pte(pte_val(pte) & ~mask);
1658}
1659
1660pte_t pte_rdprotect(pte_t pte)
1661{
1662 unsigned long mask = _PAGE_R | _PAGE_READ_4U;
1663
1664 if (tlb_type == hypervisor)
1665 mask = _PAGE_R | _PAGE_READ_4V;
1666
1667 return __pte(pte_val(pte) & ~mask);
1668}
1669
1670pte_t pte_mkclean(pte_t pte)
1671{
1672 unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
1673
1674 if (tlb_type == hypervisor)
1675 mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
1676
1677 return __pte(pte_val(pte) & ~mask);
1678}
1679
1680pte_t pte_mkold(pte_t pte)
1681{
1682 unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
1683
1684 if (tlb_type == hypervisor)
1685 mask = _PAGE_R | _PAGE_ACCESSED_4V;
1686
1687 return __pte(pte_val(pte) & ~mask);
1688}
1689
1690pte_t pte_mkyoung(pte_t pte)
1691{
1692 unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
1693
1694 if (tlb_type == hypervisor)
1695 mask = _PAGE_R | _PAGE_ACCESSED_4V;
1696
1697 return __pte(pte_val(pte) | mask);
1698}
1699
1700pte_t pte_mkwrite(pte_t pte)
1701{
1702 unsigned long mask = _PAGE_WRITE_4U;
1703
1704 if (tlb_type == hypervisor)
1705 mask = _PAGE_WRITE_4V;
1706
1707 return __pte(pte_val(pte) | mask);
1708}
1709
1710pte_t pte_mkdirty(pte_t pte)
1711{
1712 unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
1713
1714 if (tlb_type == hypervisor)
1715 mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
1716
1717 return __pte(pte_val(pte) | mask);
1718}
1719
1720pte_t pte_mkhuge(pte_t pte)
1721{
1722 unsigned long mask = _PAGE_SZHUGE_4U;
1723
1724 if (tlb_type == hypervisor)
1725 mask = _PAGE_SZHUGE_4V;
1726
1727 return __pte(pte_val(pte) | mask);
1728}
1729
1730pte_t pgoff_to_pte(unsigned long off)
1731{
1732 unsigned long bit = _PAGE_FILE_4U;
1733
1734 if (tlb_type == hypervisor)
1735 bit = _PAGE_FILE_4V;
1736
1737 return __pte((off << PAGE_SHIFT) | bit);
1738}
1739
1740pgprot_t pgprot_noncached(pgprot_t prot)
1741{
1742 unsigned long val = pgprot_val(prot);
1743 unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U;
1744 unsigned long on = _PAGE_E_4U;
1745
1746 if (tlb_type == hypervisor) {
1747 off = _PAGE_CP_4V | _PAGE_CV_4V;
1748 on = _PAGE_E_4V;
1749 }
1750
1751 return __pgprot((val & ~off) | on);
1752}
1753
1754pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
1755{
1756 unsigned long sz_bits = _PAGE_SZBITS_4U;
1757
1758 if (tlb_type == hypervisor)
1759 sz_bits = _PAGE_SZBITS_4V;
1760
1761 return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits);
1762}
1763
1764unsigned long pte_pfn(pte_t pte)
1765{
1766 unsigned long mask = _PAGE_PADDR_4U;
1767
1768 if (tlb_type == hypervisor)
1769 mask = _PAGE_PADDR_4V;
1770
1771 return (pte_val(pte) & mask) >> PAGE_SHIFT;
1772}
1773
1774pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
1775{
1776 unsigned long preserve_mask;
1777 unsigned long val;
1778
1779 preserve_mask = (_PAGE_PADDR_4U |
1780 _PAGE_MODIFIED_4U |
1781 _PAGE_ACCESSED_4U |
1782 _PAGE_CP_4U |
1783 _PAGE_CV_4U |
1784 _PAGE_E_4U |
1785 _PAGE_PRESENT_4U |
1786 _PAGE_SZBITS_4U);
1787 if (tlb_type == hypervisor)
1788 preserve_mask = (_PAGE_PADDR_4V |
1789 _PAGE_MODIFIED_4V |
1790 _PAGE_ACCESSED_4V |
1791 _PAGE_CP_4V |
1792 _PAGE_CV_4V |
1793 _PAGE_E_4V |
1794 _PAGE_PRESENT_4V |
1795 _PAGE_SZBITS_4V);
1796
1797 val = (pte_val(orig_pte) & preserve_mask);
1798
1799 return __pte(val | (pgprot_val(new_prot) & ~preserve_mask));
1800}
1801
1802static unsigned long kern_large_tte(unsigned long paddr)
1803{
1804 unsigned long val;
1805
1806 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
1807 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
1808 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
1809 if (tlb_type == hypervisor)
1810 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
1811 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
1812 _PAGE_EXEC_4V | _PAGE_W_4V);
1813
1814 return val | paddr;
1815}
1816
1817/*
1818 * Translate PROM's mapping we capture at boot time into physical address.
1819 * The second parameter is only set from prom_callback() invocations.
1820 */
1821unsigned long prom_virt_to_phys(unsigned long promva, int *error)
1822{
1823 unsigned long mask;
1824 int i;
1825
1826 mask = _PAGE_PADDR_4U;
1827 if (tlb_type == hypervisor)
1828 mask = _PAGE_PADDR_4V;
1829
1830 for (i = 0; i < prom_trans_ents; i++) {
1831 struct linux_prom_translation *p = &prom_trans[i];
1832
1833 if (promva >= p->virt &&
1834 promva < (p->virt + p->size)) {
1835 unsigned long base = p->data & mask;
1836
1837 if (error)
1838 *error = 0;
1839 return base + (promva & (8192 - 1));
1840 }
1841 }
1842 if (error)
1843 *error = 1;
1844 return 0UL;
1845}
1846
1847/* XXX We should kill off this ugly thing at so me point. XXX */
1848unsigned long sun4u_get_pte(unsigned long addr)
1849{
1850 pgd_t *pgdp;
1851 pud_t *pudp;
1852 pmd_t *pmdp;
1853 pte_t *ptep;
1854 unsigned long mask = _PAGE_PADDR_4U;
1855
1856 if (tlb_type == hypervisor)
1857 mask = _PAGE_PADDR_4V;
1858
1859 if (addr >= PAGE_OFFSET)
1860 return addr & mask;
1861
1862 if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
1863 return prom_virt_to_phys(addr, NULL);
1864
1865 pgdp = pgd_offset_k(addr);
1866 pudp = pud_offset(pgdp, addr);
1867 pmdp = pmd_offset(pudp, addr);
1868 ptep = pte_offset_kernel(pmdp, addr);
1869
1870 return pte_val(*ptep) & mask;
1871}
1872
1873/* If not locked, zap it. */
1874void __flush_tlb_all(void)
1875{
1876 unsigned long pstate;
1877 int i;
1878
1879 __asm__ __volatile__("flushw\n\t"
1880 "rdpr %%pstate, %0\n\t"
1881 "wrpr %0, %1, %%pstate"
1882 : "=r" (pstate)
1883 : "i" (PSTATE_IE));
1884 if (tlb_type == spitfire) {
1885 for (i = 0; i < 64; i++) {
1886 /* Spitfire Errata #32 workaround */
1887 /* NOTE: Always runs on spitfire, so no
1888 * cheetah+ page size encodings.
1889 */
1890 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1891 "flush %%g6"
1892 : /* No outputs */
1893 : "r" (0),
1894 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1895
1896 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
1897 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1898 "membar #Sync"
1899 : /* no outputs */
1900 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
1901 spitfire_put_dtlb_data(i, 0x0UL);
1902 }
1903
1904 /* Spitfire Errata #32 workaround */
1905 /* NOTE: Always runs on spitfire, so no
1906 * cheetah+ page size encodings.
1907 */
1908 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1909 "flush %%g6"
1910 : /* No outputs */
1911 : "r" (0),
1912 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1913
1914 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
1915 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1916 "membar #Sync"
1917 : /* no outputs */
1918 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
1919 spitfire_put_itlb_data(i, 0x0UL);
1920 }
1921 }
1922 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1923 cheetah_flush_dtlb_all();
1924 cheetah_flush_itlb_all();
1925 }
1926 __asm__ __volatile__("wrpr %0, 0, %%pstate"
1927 : : "r" (pstate));
1928}
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index c5dc4b0cc1c5..975242ab88ee 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -85,8 +85,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
85 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb); 85 mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
86 86
87 base = TSBMAP_BASE; 87 base = TSBMAP_BASE;
88 tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP | 88 tte = pgprot_val(PAGE_KERNEL_LOCKED);
89 _PAGE_CV | _PAGE_P | _PAGE_W);
90 tsb_paddr = __pa(mm->context.tsb); 89 tsb_paddr = __pa(mm->context.tsb);
91 BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); 90 BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
92 91
@@ -99,55 +98,48 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
99#ifdef DCACHE_ALIASING_POSSIBLE 98#ifdef DCACHE_ALIASING_POSSIBLE
100 base += (tsb_paddr & 8192); 99 base += (tsb_paddr & 8192);
101#endif 100#endif
102 tte |= _PAGE_SZ8K;
103 page_sz = 8192; 101 page_sz = 8192;
104 break; 102 break;
105 103
106 case 8192 << 1: 104 case 8192 << 1:
107 tsb_reg = 0x1UL; 105 tsb_reg = 0x1UL;
108 tte |= _PAGE_SZ64K;
109 page_sz = 64 * 1024; 106 page_sz = 64 * 1024;
110 break; 107 break;
111 108
112 case 8192 << 2: 109 case 8192 << 2:
113 tsb_reg = 0x2UL; 110 tsb_reg = 0x2UL;
114 tte |= _PAGE_SZ64K;
115 page_sz = 64 * 1024; 111 page_sz = 64 * 1024;
116 break; 112 break;
117 113
118 case 8192 << 3: 114 case 8192 << 3:
119 tsb_reg = 0x3UL; 115 tsb_reg = 0x3UL;
120 tte |= _PAGE_SZ64K;
121 page_sz = 64 * 1024; 116 page_sz = 64 * 1024;
122 break; 117 break;
123 118
124 case 8192 << 4: 119 case 8192 << 4:
125 tsb_reg = 0x4UL; 120 tsb_reg = 0x4UL;
126 tte |= _PAGE_SZ512K;
127 page_sz = 512 * 1024; 121 page_sz = 512 * 1024;
128 break; 122 break;
129 123
130 case 8192 << 5: 124 case 8192 << 5:
131 tsb_reg = 0x5UL; 125 tsb_reg = 0x5UL;
132 tte |= _PAGE_SZ512K;
133 page_sz = 512 * 1024; 126 page_sz = 512 * 1024;
134 break; 127 break;
135 128
136 case 8192 << 6: 129 case 8192 << 6:
137 tsb_reg = 0x6UL; 130 tsb_reg = 0x6UL;
138 tte |= _PAGE_SZ512K;
139 page_sz = 512 * 1024; 131 page_sz = 512 * 1024;
140 break; 132 break;
141 133
142 case 8192 << 7: 134 case 8192 << 7:
143 tsb_reg = 0x7UL; 135 tsb_reg = 0x7UL;
144 tte |= _PAGE_SZ4MB;
145 page_sz = 4 * 1024 * 1024; 136 page_sz = 4 * 1024 * 1024;
146 break; 137 break;
147 138
148 default: 139 default:
149 BUG(); 140 BUG();
150 }; 141 };
142 tte |= pte_sz_bits(page_sz);
151 143
152 if (tlb_type == cheetah_plus || tlb_type == hypervisor) { 144 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
153 /* Physical mapping, no locked TLB entry for TSB. */ 145 /* Physical mapping, no locked TLB entry for TSB. */
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h
index a480007f0a9d..bd8bce704a9f 100644
--- a/include/asm-sparc64/pgtable.h
+++ b/include/asm-sparc64/pgtable.h
@@ -90,134 +90,48 @@
90 90
91#endif /* !(__ASSEMBLY__) */ 91#endif /* !(__ASSEMBLY__) */
92 92
93/* Spitfire/Cheetah TTE bits. */ 93/* PTE bits which are the same in SUN4U and SUN4V format. */
94#define _PAGE_VALID _AC(0x8000000000000000,UL) /* Valid TTE */ 94#define _PAGE_VALID 0x8000000000000000 /* Valid TTE */
95#define _PAGE_R _AC(0x8000000000000000,UL) /* Keep ref bit up to date*/ 95#define _PAGE_R 0x8000000000000000 /* Keep ref bit up to date*/
96#define _PAGE_SZ4MB _AC(0x6000000000000000,UL) /* 4MB Page */ 96
97#define _PAGE_SZ512K _AC(0x4000000000000000,UL) /* 512K Page */ 97/* These are actually filled in at boot time by sun4{u,v}_pgprot_init() */
98#define _PAGE_SZ64K _AC(0x2000000000000000,UL) /* 64K Page */ 98#define __P000 __pgprot(0)
99#define _PAGE_SZ8K _AC(0x0000000000000000,UL) /* 8K Page */ 99#define __P001 __pgprot(0)
100#define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */ 100#define __P010 __pgprot(0)
101#define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */ 101#define __P011 __pgprot(0)
102#define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ 102#define __P100 __pgprot(0)
103#define _PAGE_RES1 _AC(0x0002000000000000,UL) /* Reserved */ 103#define __P101 __pgprot(0)
104#define _PAGE_SZ32MB _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ 104#define __P110 __pgprot(0)
105#define _PAGE_SZ256MB _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ 105#define __P111 __pgprot(0)
106#define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ 106
107#define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */ 107#define __S000 __pgprot(0)
108#define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/ 108#define __S001 __pgprot(0)
109#define _PAGE_PADDR _AC(0x000007FFFFFFE000,UL) /* (Cheetah) paddr[42:13] */ 109#define __S010 __pgprot(0)
110#define _PAGE_SOFT _AC(0x0000000000001F80,UL) /* Software bits */ 110#define __S011 __pgprot(0)
111#define _PAGE_L _AC(0x0000000000000040,UL) /* Locked TTE */ 111#define __S100 __pgprot(0)
112#define _PAGE_CP _AC(0x0000000000000020,UL) /* Cacheable in P-Cache */ 112#define __S101 __pgprot(0)
113#define _PAGE_CV _AC(0x0000000000000010,UL) /* Cacheable in V-Cache */ 113#define __S110 __pgprot(0)
114#define _PAGE_E _AC(0x0000000000000008,UL) /* side-Effect */ 114#define __S111 __pgprot(0)
115#define _PAGE_P _AC(0x0000000000000004,UL) /* Privileged Page */
116#define _PAGE_W _AC(0x0000000000000002,UL) /* Writable */
117#define _PAGE_G _AC(0x0000000000000001,UL) /* Global */
118
119#define _PAGE_ALL_SZ_BITS \
120 (_PAGE_SZ4MB | _PAGE_SZ512K | _PAGE_SZ64K | \
121 _PAGE_SZ8K | _PAGE_SZ32MB | _PAGE_SZ256MB)
122
123/* Here are the SpitFire software bits we use in the TTE's.
124 *
125 * WARNING: If you are going to try and start using some
126 * of the soft2 bits, you will need to make
127 * modifications to the swap entry implementation.
128 * For example, one thing that could happen is that
129 * swp_entry_to_pte() would BUG_ON() if you tried
130 * to use one of the soft2 bits for _PAGE_FILE.
131 *
132 * Like other architectures, I have aliased _PAGE_FILE with
133 * _PAGE_MODIFIED. This works because _PAGE_FILE is never
134 * interpreted that way unless _PAGE_PRESENT is clear.
135 */
136#define _PAGE_EXEC _AC(0x0000000000001000,UL) /* Executable SW bit */
137#define _PAGE_MODIFIED _AC(0x0000000000000800,UL) /* Modified (dirty) */
138#define _PAGE_FILE _AC(0x0000000000000800,UL) /* Pagecache page */
139#define _PAGE_ACCESSED _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
140#define _PAGE_READ _AC(0x0000000000000200,UL) /* Readable SW Bit */
141#define _PAGE_WRITE _AC(0x0000000000000100,UL) /* Writable SW Bit */
142#define _PAGE_PRESENT _AC(0x0000000000000080,UL) /* Present */
143
144#if PAGE_SHIFT == 13
145#define _PAGE_SZBITS _PAGE_SZ8K
146#elif PAGE_SHIFT == 16
147#define _PAGE_SZBITS _PAGE_SZ64K
148#elif PAGE_SHIFT == 19
149#define _PAGE_SZBITS _PAGE_SZ512K
150#elif PAGE_SHIFT == 22
151#define _PAGE_SZBITS _PAGE_SZ4MB
152#else
153#error Wrong PAGE_SHIFT specified
154#endif
155
156#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
157#define _PAGE_SZHUGE _PAGE_SZ4MB
158#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
159#define _PAGE_SZHUGE _PAGE_SZ512K
160#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
161#define _PAGE_SZHUGE _PAGE_SZ64K
162#endif
163
164#define _PAGE_CACHE (_PAGE_CP | _PAGE_CV)
165
166#define __DIRTY_BITS (_PAGE_MODIFIED | _PAGE_WRITE | _PAGE_W)
167#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_READ | _PAGE_R)
168#define __PRIV_BITS _PAGE_P
169
170#define PAGE_NONE __pgprot (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_CACHE)
171
172/* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */
173#define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
174 __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
175
176#define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
177 __ACCESS_BITS | _PAGE_EXEC)
178
179#define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
180 __ACCESS_BITS | _PAGE_EXEC)
181
182#define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
183 __PRIV_BITS | \
184 __ACCESS_BITS | __DIRTY_BITS | _PAGE_EXEC)
185
186#define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
187 _PAGE_CACHE | \
188 __ACCESS_BITS | _PAGE_WRITE)
189
190#define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
191 _PAGE_CACHE | __ACCESS_BITS)
192
193#define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | \
194 _PAGE_CACHE | __ACCESS_BITS)
195
196#define _PFN_MASK _PAGE_PADDR
197
198#define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | \
199 __ACCESS_BITS | _PAGE_E)
200
201#define __P000 PAGE_NONE
202#define __P001 PAGE_READONLY_NOEXEC
203#define __P010 PAGE_COPY_NOEXEC
204#define __P011 PAGE_COPY_NOEXEC
205#define __P100 PAGE_READONLY
206#define __P101 PAGE_READONLY
207#define __P110 PAGE_COPY
208#define __P111 PAGE_COPY
209
210#define __S000 PAGE_NONE
211#define __S001 PAGE_READONLY_NOEXEC
212#define __S010 PAGE_SHARED_NOEXEC
213#define __S011 PAGE_SHARED_NOEXEC
214#define __S100 PAGE_READONLY
215#define __S101 PAGE_READONLY
216#define __S110 PAGE_SHARED
217#define __S111 PAGE_SHARED
218 115
219#ifndef __ASSEMBLY__ 116#ifndef __ASSEMBLY__
220 117
118extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
119
120extern unsigned long pte_sz_bits(unsigned long size);
121
122extern pgprot_t PAGE_KERNEL;
123extern pgprot_t PAGE_KERNEL_LOCKED;
124extern pgprot_t PAGE_COPY;
125
126/* XXX This uglyness is for the atyfb driver's sparc mmap() support. XXX */
127extern unsigned long _PAGE_IE;
128extern unsigned long _PAGE_E;
129extern unsigned long _PAGE_CACHE;
130
131extern unsigned long pg_iobits;
132extern unsigned long _PAGE_ALL_SZ_BITS;
133extern unsigned long _PAGE_SZBITS;
134
221extern unsigned long phys_base; 135extern unsigned long phys_base;
222extern unsigned long pfn_base; 136extern unsigned long pfn_base;
223 137
@@ -229,27 +143,12 @@ extern struct page *mem_map_zero;
229 * the first physical page in the machine is at some huge physical address, 143 * the first physical page in the machine is at some huge physical address,
230 * such as 4GB. This is common on a partitioned E10000, for example. 144 * such as 4GB. This is common on a partitioned E10000, for example.
231 */ 145 */
232 146extern pte_t pfn_pte(unsigned long, pgprot_t);
233#define pfn_pte(pfn, prot) \
234 __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot) | _PAGE_SZBITS)
235#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 147#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
148extern unsigned long pte_pfn(pte_t);
149#define pte_page(x) pfn_to_page(pte_pfn(x))
150extern pte_t pte_modify(pte_t, pgprot_t);
236 151
237#define pte_pfn(x) ((pte_val(x) & _PAGE_PADDR)>>PAGE_SHIFT)
238#define pte_page(x) pfn_to_page(pte_pfn(x))
239
240static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
241{
242 pte_t __pte;
243 const unsigned long preserve_mask = (_PFN_MASK |
244 _PAGE_MODIFIED | _PAGE_ACCESSED |
245 _PAGE_CACHE | _PAGE_E |
246 _PAGE_PRESENT | _PAGE_SZBITS);
247
248 pte_val(__pte) = (pte_val(orig_pte) & preserve_mask) |
249 (pgprot_val(new_prot) & ~preserve_mask);
250
251 return __pte;
252}
253#define pmd_set(pmdp, ptep) \ 152#define pmd_set(pmdp, ptep) \
254 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL)) 153 (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
255#define pud_set(pudp, pmdp) \ 154#define pud_set(pudp, pmdp) \
@@ -259,8 +158,6 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
259#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd)) 158#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
260#define pud_page(pud) \ 159#define pud_page(pud) \
261 ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL))) 160 ((unsigned long) __va((((unsigned long)pud_val(pud))<<11UL)))
262#define pte_none(pte) (!pte_val(pte))
263#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
264#define pmd_none(pmd) (!pmd_val(pmd)) 161#define pmd_none(pmd) (!pmd_val(pmd))
265#define pmd_bad(pmd) (0) 162#define pmd_bad(pmd) (0)
266#define pmd_present(pmd) (pmd_val(pmd) != 0U) 163#define pmd_present(pmd) (pmd_val(pmd) != 0U)
@@ -270,30 +167,29 @@ static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
270#define pud_present(pud) (pud_val(pud) != 0U) 167#define pud_present(pud) (pud_val(pud) != 0U)
271#define pud_clear(pudp) (pud_val(*(pudp)) = 0U) 168#define pud_clear(pudp) (pud_val(*(pudp)) = 0U)
272 169
170/* Same in both SUN4V and SUN4U. */
171#define pte_none(pte) (!pte_val(pte))
172
173extern unsigned long pte_present(pte_t);
174
273/* The following only work if pte_present() is true. 175/* The following only work if pte_present() is true.
274 * Undefined behaviour if not.. 176 * Undefined behaviour if not..
275 */ 177 */
276#define pte_read(pte) (pte_val(pte) & _PAGE_READ) 178extern unsigned long pte_read(pte_t);
277#define pte_exec(pte) (pte_val(pte) & _PAGE_EXEC) 179extern unsigned long pte_exec(pte_t);
278#define pte_write(pte) (pte_val(pte) & _PAGE_WRITE) 180extern unsigned long pte_write(pte_t);
279#define pte_dirty(pte) (pte_val(pte) & _PAGE_MODIFIED) 181extern unsigned long pte_dirty(pte_t);
280#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED) 182extern unsigned long pte_young(pte_t);
281#define pte_wrprotect(pte) (__pte(pte_val(pte) & ~(_PAGE_WRITE|_PAGE_W))) 183extern pte_t pte_wrprotect(pte_t);
282#define pte_rdprotect(pte) \ 184extern pte_t pte_rdprotect(pte_t);
283 (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_READ)) 185extern pte_t pte_mkclean(pte_t);
284#define pte_mkclean(pte) \ 186extern pte_t pte_mkold(pte_t);
285 (__pte(pte_val(pte) & ~(_PAGE_MODIFIED|_PAGE_W)))
286#define pte_mkold(pte) \
287 (__pte(((pte_val(pte)<<1UL)>>1UL) & ~_PAGE_ACCESSED))
288
289/* Permanent address of a page. */
290#define __page_address(page) page_address(page)
291 187
292/* Be very careful when you change these three, they are delicate. */ 188/* Be very careful when you change these three, they are delicate. */
293#define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_ACCESSED | _PAGE_R)) 189extern pte_t pte_mkyoung(pte_t);
294#define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_WRITE)) 190extern pte_t pte_mkwrite(pte_t);
295#define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_MODIFIED | _PAGE_W)) 191extern pte_t pte_mkdirty(pte_t);
296#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_SZHUGE)) 192extern pte_t pte_mkhuge(pte_t);
297 193
298/* to find an entry in a page-table-directory. */ 194/* to find an entry in a page-table-directory. */
299#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) 195#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
@@ -328,6 +224,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p
328 224
329 /* It is more efficient to let flush_tlb_kernel_range() 225 /* It is more efficient to let flush_tlb_kernel_range()
330 * handle init_mm tlb flushes. 226 * handle init_mm tlb flushes.
227 *
228 * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
229 * and SUN4V pte layout, so this inline test is fine.
331 */ 230 */
332 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID)) 231 if (likely(mm != &init_mm) && (pte_val(orig) & _PAGE_VALID))
333 tlb_batch_add(mm, addr, ptep, orig); 232 tlb_batch_add(mm, addr, ptep, orig);
@@ -362,42 +261,23 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
362#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 261#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
363 262
364/* File offset in PTE support. */ 263/* File offset in PTE support. */
365#define pte_file(pte) (pte_val(pte) & _PAGE_FILE) 264extern unsigned long pte_file(pte_t);
366#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) 265#define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT)
367#define pgoff_to_pte(off) (__pte(((off) << PAGE_SHIFT) | _PAGE_FILE)) 266extern pte_t pgoff_to_pte(unsigned long);
368#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) 267#define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL)
369 268
370extern unsigned long prom_virt_to_phys(unsigned long, int *); 269extern unsigned long prom_virt_to_phys(unsigned long, int *);
371 270
372static __inline__ unsigned long 271extern unsigned long sun4u_get_pte(unsigned long);
373sun4u_get_pte (unsigned long addr)
374{
375 pgd_t *pgdp;
376 pud_t *pudp;
377 pmd_t *pmdp;
378 pte_t *ptep;
379
380 if (addr >= PAGE_OFFSET)
381 return addr & _PAGE_PADDR;
382 if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
383 return prom_virt_to_phys(addr, NULL);
384 pgdp = pgd_offset_k(addr);
385 pudp = pud_offset(pgdp, addr);
386 pmdp = pmd_offset(pudp, addr);
387 ptep = pte_offset_kernel(pmdp, addr);
388 return pte_val(*ptep) & _PAGE_PADDR;
389}
390 272
391static __inline__ unsigned long 273static inline unsigned long __get_phys(unsigned long addr)
392__get_phys (unsigned long addr)
393{ 274{
394 return sun4u_get_pte (addr); 275 return sun4u_get_pte(addr);
395} 276}
396 277
397static __inline__ int 278static inline int __get_iospace(unsigned long addr)
398__get_iospace (unsigned long addr)
399{ 279{
400 return ((sun4u_get_pte (addr) & 0xf0000000) >> 28); 280 return ((sun4u_get_pte(addr) & 0xf0000000) >> 28);
401} 281}
402 282
403extern unsigned long *sparc64_valid_addr_bitmap; 283extern unsigned long *sparc64_valid_addr_bitmap;
@@ -411,9 +291,7 @@ extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
411 unsigned long size, pgprot_t prot); 291 unsigned long size, pgprot_t prot);
412 292
413/* Clear virtual and physical cachability, set side-effect bit. */ 293/* Clear virtual and physical cachability, set side-effect bit. */
414#define pgprot_noncached(prot) \ 294extern pgprot_t pgprot_noncached(pgprot_t);
415 (__pgprot((pgprot_val(prot) & ~(_PAGE_CP | _PAGE_CV)) | \
416 _PAGE_E))
417 295
418/* 296/*
419 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in 297 * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in