aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-07 02:44:37 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:52 -0500
commitd257d5da39a78b32721ca84b2ba7f461f2f7ed7f (patch)
treeac28d377688ebe13a4d38e05f4ff65ba73d8652a
parent840aaef8db32572b6d11e0d5cb5e6efcbc812000 (diff)
[SPARC64]: Initial sun4v TLB miss handling infrastructure.
Things are a little tricky because, unlike sun4u, we have to: 1) do a hypervisor trap to do the TLB load. 2) do the TSB lookup calculations by hand Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/kernel/head.S1
-rw-r--r--arch/sparc64/kernel/ktlb.S12
-rw-r--r--arch/sparc64/kernel/sun4v_tlb_miss.S219
-rw-r--r--arch/sparc64/kernel/tsb.S89
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S3
-rw-r--r--arch/sparc64/mm/init.c24
-rw-r--r--include/asm-sparc64/cpudata.h8
-rw-r--r--include/asm-sparc64/tsb.h11
8 files changed, 349 insertions, 18 deletions
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 7840271d7aae..03fc0b5b1d98 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -474,6 +474,7 @@ setup_tba:
474sparc64_boot_end: 474sparc64_boot_end:
475 475
476#include "systbls.S" 476#include "systbls.S"
477#include "sun4v_tlb_miss.S"
477#include "ktlb.S" 478#include "ktlb.S"
478#include "tsb.S" 479#include "tsb.S"
479#include "etrap.S" 480#include "etrap.S"
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index c1335432124e..2e55084a0c12 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -16,12 +16,16 @@
16 .text 16 .text
17 .align 32 17 .align 32
18 18
19 .globl kvmap_itlb
20kvmap_itlb: 19kvmap_itlb:
21 /* g6: TAG TARGET */ 20 /* g6: TAG TARGET */
22 mov TLB_TAG_ACCESS, %g4 21 mov TLB_TAG_ACCESS, %g4
23 ldxa [%g4] ASI_IMMU, %g4 22 ldxa [%g4] ASI_IMMU, %g4
24 23
24 /* sun4v_itlb_miss branches here with the missing virtual
25 * address already loaded into %g4
26 */
27kvmap_itlb_4v:
28
25kvmap_itlb_nonlinear: 29kvmap_itlb_nonlinear:
26 /* Catch kernel NULL pointer calls. */ 30 /* Catch kernel NULL pointer calls. */
27 sethi %hi(PAGE_SIZE), %g5 31 sethi %hi(PAGE_SIZE), %g5
@@ -94,11 +98,15 @@ kvmap_dtlb_obp:
94 nop 98 nop
95 99
96 .align 32 100 .align 32
97 .globl kvmap_dtlb
98kvmap_dtlb: 101kvmap_dtlb:
99 /* %g6: TAG TARGET */ 102 /* %g6: TAG TARGET */
100 mov TLB_TAG_ACCESS, %g4 103 mov TLB_TAG_ACCESS, %g4
101 ldxa [%g4] ASI_DMMU, %g4 104 ldxa [%g4] ASI_DMMU, %g4
105
106 /* sun4v_dtlb_miss branches here with the missing virtual
107 * address already loaded into %g4
108 */
109kvmap_dtlb_4v:
102 brgez,pn %g4, kvmap_dtlb_nonlinear 110 brgez,pn %g4, kvmap_dtlb_nonlinear
103 nop 111 nop
104 112
diff --git a/arch/sparc64/kernel/sun4v_tlb_miss.S b/arch/sparc64/kernel/sun4v_tlb_miss.S
new file mode 100644
index 000000000000..58ea5dd8573c
--- /dev/null
+++ b/arch/sparc64/kernel/sun4v_tlb_miss.S
@@ -0,0 +1,219 @@
1/* sun4v_tlb_miss.S: Sun4v TLB miss handlers.
2 *
3 * Copyright (C) 2006 <davem@davemloft.net>
4 */
5
6 .text
7 .align 32
8
9sun4v_itlb_miss:
10 /* Load CPU ID into %g3. */
11 mov SCRATCHPAD_CPUID, %g1
12 ldxa [%g1] ASI_SCRATCHPAD, %g3
13
14 /* Load UTSB reg into %g1. */
15 ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1
16
17 /* Load &trap_block[smp_processor_id()] into %g2. */
18 sethi %hi(trap_block), %g2
19 or %g2, %lo(trap_block), %g2
20 sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
21 add %g2, %g3, %g2
22
23 /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6.
24 * Branch if kernel TLB miss. The kernel TSB and user TSB miss
25 * code wants the missing virtual address in %g4, so that value
26 * cannot be modified through the entirety of this handler.
27 */
28 ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_ADDR_OFFSET], %g4
29 ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_I_CTX_OFFSET], %g5
30 srlx %g4, 22, %g3
31 sllx %g5, 48, %g6
32 or %g6, %g3, %g6
33 brz,pn %g5, kvmap_itlb_4v
34 nop
35
36 /* Create TSB pointer. This is something like:
37 *
38 * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
39 * tsb_base = tsb_reg & ~0x7UL;
40 */
41 and %g1, 0x7, %g3
42 andn %g1, 0x7, %g1
43 mov 512, %g7
44 sllx %g7, %g3, %g7
45 sub %g7, 1, %g7
46
47 /* TSB index mask is in %g7, tsb base is in %g1. Compute
48 * the TSB entry pointer into %g1:
49 *
50 * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
51 * tsb_ptr = tsb_base + (tsb_index * 16);
52 */
53 srlx %g4, PAGE_SHIFT, %g3
54 and %g3, %g7, %g3
55 sllx %g3, 4, %g3
56 add %g1, %g3, %g1
57
58 /* Load TSB tag/pte into %g2/%g3 and compare the tag. */
59 ldda [%g1] ASI_QUAD_LDD_PHYS, %g2
60 cmp %g2, %g6
61 sethi %hi(_PAGE_EXEC), %g7
62 bne,a,pn %xcc, tsb_miss_page_table_walk
63 mov FAULT_CODE_ITLB, %g3
64 andcc %g3, %g7, %g0
65 be,a,pn %xcc, tsb_do_fault
66 mov FAULT_CODE_ITLB, %g3
67
68 /* We have a valid entry, make hypervisor call to load
69 * I-TLB and return from trap.
70 *
71 * %g3: PTE
72 * %g4: vaddr
73 * %g6: TAG TARGET (only "CTX << 48" part matters)
74 */
75sun4v_itlb_load:
76 mov %o0, %g1 ! save %o0
77 mov %o1, %g2 ! save %o1
78 mov %o2, %g5 ! save %o2
79 mov %o3, %g7 ! save %o3
80 mov %g4, %o0 ! vaddr
81 srlx %g6, 48, %o1 ! ctx
82 mov %g3, %o2 ! PTE
83 mov HV_MMU_IMMU, %o3 ! flags
84 ta HV_MMU_MAP_ADDR_TRAP
85 mov %g1, %o0 ! restore %o0
86 mov %g2, %o1 ! restore %o1
87 mov %g5, %o2 ! restore %o2
88 mov %g7, %o3 ! restore %o3
89
90 retry
91
92sun4v_dtlb_miss:
93 /* Load CPU ID into %g3. */
94 mov SCRATCHPAD_CPUID, %g1
95 ldxa [%g1] ASI_SCRATCHPAD, %g3
96
97 /* Load UTSB reg into %g1. */
98 ldxa [%g1 + %g1] ASI_SCRATCHPAD, %g1
99
100 /* Load &trap_block[smp_processor_id()] into %g2. */
101 sethi %hi(trap_block), %g2
102 or %g2, %lo(trap_block), %g2
103 sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
104 add %g2, %g3, %g2
105
106 /* Create a TAG TARGET, "(vaddr>>22) | (ctx << 48)", in %g6.
107 * Branch if kernel TLB miss. The kernel TSB and user TSB miss
108 * code wants the missing virtual address in %g4, so that value
109 * cannot be modified through the entirety of this handler.
110 */
111 ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g4
112 ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_CTX_OFFSET], %g5
113 srlx %g4, 22, %g3
114 sllx %g5, 48, %g6
115 or %g6, %g3, %g6
116 brz,pn %g5, kvmap_dtlb_4v
117 nop
118
119 /* Create TSB pointer. This is something like:
120 *
121 * index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
122 * tsb_base = tsb_reg & ~0x7UL;
123 */
124 and %g1, 0x7, %g3
125 andn %g1, 0x7, %g1
126 mov 512, %g7
127 sllx %g7, %g3, %g7
128 sub %g7, 1, %g7
129
130 /* TSB index mask is in %g7, tsb base is in %g1. Compute
131 * the TSB entry pointer into %g1:
132 *
133 * tsb_index = ((vaddr >> PAGE_SHIFT) & tsb_mask);
134 * tsb_ptr = tsb_base + (tsb_index * 16);
135 */
136 srlx %g4, PAGE_SHIFT, %g3
137 and %g3, %g7, %g3
138 sllx %g3, 4, %g3
139 add %g1, %g3, %g1
140
141 /* Load TSB tag/pte into %g2/%g3 and compare the tag. */
142 ldda [%g1] ASI_QUAD_LDD_PHYS, %g2
143 cmp %g2, %g6
144 bne,a,pn %xcc, tsb_miss_page_table_walk
145 mov FAULT_CODE_ITLB, %g3
146
147 /* We have a valid entry, make hypervisor call to load
148 * D-TLB and return from trap.
149 *
150 * %g3: PTE
151 * %g4: vaddr
152 * %g6: TAG TARGET (only "CTX << 48" part matters)
153 */
154sun4v_dtlb_load:
155 mov %o0, %g1 ! save %o0
156 mov %o1, %g2 ! save %o1
157 mov %o2, %g5 ! save %o2
158 mov %o3, %g7 ! save %o3
159 mov %g4, %o0 ! vaddr
160 srlx %g6, 48, %o1 ! ctx
161 mov %g3, %o2 ! PTE
162 mov HV_MMU_DMMU, %o3 ! flags
163 ta HV_MMU_MAP_ADDR_TRAP
164 mov %g1, %o0 ! restore %o0
165 mov %g2, %o1 ! restore %o1
166 mov %g5, %o2 ! restore %o2
167 mov %g7, %o3 ! restore %o3
168
169 retry
170
171sun4v_dtlb_prot:
172 /* Load CPU ID into %g3. */
173 mov SCRATCHPAD_CPUID, %g1
174 ldxa [%g1] ASI_SCRATCHPAD, %g3
175
176 /* Load &trap_block[smp_processor_id()] into %g2. */
177 sethi %hi(trap_block), %g2
178 or %g2, %lo(trap_block), %g2
179 sllx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
180 add %g2, %g3, %g2
181
182 ldx [%g2 + TRAP_PER_CPU_FAULT_INFO + HV_FAULT_D_ADDR_OFFSET], %g5
183 rdpr %tl, %g1
184 cmp %g1, 1
185 bgu,pn %xcc, winfix_trampoline
186 nop
187 ba,pt %xcc, sparc64_realfault_common
188 mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
189
190#define BRANCH_ALWAYS 0x10680000
191#define NOP 0x01000000
192#define SUN4V_DO_PATCH(OLD, NEW) \
193 sethi %hi(NEW), %g1; \
194 or %g1, %lo(NEW), %g1; \
195 sethi %hi(OLD), %g2; \
196 or %g2, %lo(OLD), %g2; \
197 sub %g1, %g2, %g1; \
198 sethi %hi(BRANCH_ALWAYS), %g3; \
199 srl %g1, 2, %g1; \
200 or %g3, %lo(BRANCH_ALWAYS), %g3; \
201 or %g3, %g1, %g3; \
202 stw %g3, [%g2]; \
203 sethi %hi(NOP), %g3; \
204 or %g3, %lo(NOP), %g3; \
205 stw %g3, [%g2 + 0x4]; \
206 flush %g2;
207
208 .globl sun4v_patch_tlb_handlers
209 .type sun4v_patch_tlb_handlers,#function
210sun4v_patch_tlb_handlers:
211 SUN4V_DO_PATCH(tl0_iamiss, sun4v_itlb_miss)
212 SUN4V_DO_PATCH(tl1_iamiss, sun4v_itlb_miss)
213 SUN4V_DO_PATCH(tl0_damiss, sun4v_dtlb_miss)
214 SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss)
215 SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot)
216 SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot)
217 retl
218 nop
219 .size sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 96e63168d8b2..818bc9e9135a 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -18,30 +18,33 @@
18 * %g4: available temporary 18 * %g4: available temporary
19 * %g5: available temporary 19 * %g5: available temporary
20 * %g6: TAG TARGET 20 * %g6: TAG TARGET
21 * %g7: physical address base of the linux page 21 * %g7: available temporary, will be loaded by us with
22 * the physical address base of the linux page
22 * tables for the current address space 23 * tables for the current address space
23 */ 24 */
24 .globl tsb_miss_dtlb
25tsb_miss_dtlb: 25tsb_miss_dtlb:
26 mov TLB_TAG_ACCESS, %g4 26 mov TLB_TAG_ACCESS, %g4
27 ldxa [%g4] ASI_DMMU, %g4 27 ldxa [%g4] ASI_DMMU, %g4
28 ba,pt %xcc, tsb_miss_page_table_walk 28 ba,pt %xcc, tsb_miss_page_table_walk
29 nop 29 nop
30 30
31 .globl tsb_miss_itlb
32tsb_miss_itlb: 31tsb_miss_itlb:
33 mov TLB_TAG_ACCESS, %g4 32 mov TLB_TAG_ACCESS, %g4
34 ldxa [%g4] ASI_IMMU, %g4 33 ldxa [%g4] ASI_IMMU, %g4
35 ba,pt %xcc, tsb_miss_page_table_walk 34 ba,pt %xcc, tsb_miss_page_table_walk
36 nop 35 nop
37 36
37 /* The sun4v TLB miss handlers jump directly here instead
38 * of tsb_miss_{d,i}tlb with the missing virtual address
39 * already loaded into %g4.
40 */
38tsb_miss_page_table_walk: 41tsb_miss_page_table_walk:
39 TRAP_LOAD_PGD_PHYS(%g7, %g5) 42 TRAP_LOAD_PGD_PHYS(%g7, %g5)
40 43
41 USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) 44 USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
42 45
43tsb_reload: 46tsb_reload:
44 TSB_LOCK_TAG(%g1, %g2, %g4) 47 TSB_LOCK_TAG(%g1, %g2, %g7)
45 48
46 /* Load and check PTE. */ 49 /* Load and check PTE. */
47 ldxa [%g5] ASI_PHYS_USE_EC, %g5 50 ldxa [%g5] ASI_PHYS_USE_EC, %g5
@@ -52,9 +55,9 @@ tsb_reload:
52 * bother putting it into the TSB. 55 * bother putting it into the TSB.
53 */ 56 */
54 srlx %g5, 32, %g2 57 srlx %g5, 32, %g2
55 sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g4 58 sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g7
59 and %g2, %g7, %g2
56 sethi %hi(_PAGE_SZBITS >> 32), %g7 60 sethi %hi(_PAGE_SZBITS >> 32), %g7
57 and %g2, %g4, %g2
58 cmp %g2, %g7 61 cmp %g2, %g7
59 bne,a,pn %xcc, tsb_tlb_reload 62 bne,a,pn %xcc, tsb_tlb_reload
60 TSB_STORE(%g1, %g0) 63 TSB_STORE(%g1, %g0)
@@ -68,12 +71,54 @@ tsb_tlb_reload:
68 nop 71 nop
69 72
70tsb_dtlb_load: 73tsb_dtlb_load:
71 stxa %g5, [%g0] ASI_DTLB_DATA_IN 74
75661: stxa %g5, [%g0] ASI_DTLB_DATA_IN
72 retry 76 retry
77 .section .gl_2insn_patch, "ax"
78 .word 661b
79 nop
80 nop
81 .previous
82
83 /* For sun4v the ASI_DTLB_DATA_IN store and the retry
84 * instruction get nop'd out and we get here to branch
85 * to the sun4v tlb load code. The registers are setup
86 * as follows:
87 *
88 * %g4: vaddr
89 * %g5: PTE
90 * %g6: TAG
91 *
92 * The sun4v TLB load wants the PTE in %g3 so we fix that
93 * up here.
94 */
95 ba,pt %xcc, sun4v_dtlb_load
96 mov %g5, %g3
73 97
74tsb_itlb_load: 98tsb_itlb_load:
75 stxa %g5, [%g0] ASI_ITLB_DATA_IN 99
100661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
76 retry 101 retry
102 .section .gl_2insn_patch, "ax"
103 .word 661b
104 nop
105 nop
106 .previous
107
108 /* For sun4v the ASI_ITLB_DATA_IN store and the retry
109 * instruction get nop'd out and we get here to branch
110 * to the sun4v tlb load code. The registers are setup
111 * as follows:
112 *
113 * %g4: vaddr
114 * %g5: PTE
115 * %g6: TAG
116 *
117 * The sun4v TLB load wants the PTE in %g3 so we fix that
118 * up here.
119 */
120 ba,pt %xcc, sun4v_itlb_load
121 mov %g5, %g3
77 122
78 /* No valid entry in the page tables, do full fault 123 /* No valid entry in the page tables, do full fault
79 * processing. 124 * processing.
@@ -95,10 +140,17 @@ tsb_do_fault:
95 nop 140 nop
96 141
97tsb_do_dtlb_fault: 142tsb_do_dtlb_fault:
98 rdpr %tl, %g4 143 rdpr %tl, %g3
99 cmp %g4, 1 144 cmp %g3, 1
100 mov TLB_TAG_ACCESS, %g4 145
146661: mov TLB_TAG_ACCESS, %g4
101 ldxa [%g4] ASI_DMMU, %g5 147 ldxa [%g4] ASI_DMMU, %g5
148 .section .gl_2insn_patch, "ax"
149 .word 661b
150 mov %g4, %g5
151 nop
152 .previous
153
102 be,pt %xcc, sparc64_realfault_common 154 be,pt %xcc, sparc64_realfault_common
103 mov FAULT_CODE_DTLB, %g4 155 mov FAULT_CODE_DTLB, %g4
104 ba,pt %xcc, winfix_trampoline 156 ba,pt %xcc, winfix_trampoline
@@ -196,12 +248,23 @@ __tsb_context_switch:
196 add %g2, %g1, %g2 248 add %g2, %g1, %g2
197 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR] 249 stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
198 250
199 mov TSB_REG, %g1 251661: mov TSB_REG, %g1
200 stxa %o1, [%g1] ASI_DMMU 252 stxa %o1, [%g1] ASI_DMMU
253 .section .gl_2insn_patch, "ax"
254 .word 661b
255 mov SCRATCHPAD_UTSBREG1, %g1
256 stxa %o1, [%g1] ASI_SCRATCHPAD
257 .previous
258
201 membar #Sync 259 membar #Sync
202 260
203 stxa %o1, [%g1] ASI_IMMU 261661: stxa %o1, [%g1] ASI_IMMU
204 membar #Sync 262 membar #Sync
263 .section .gl_2insn_patch, "ax"
264 .word 661b
265 nop
266 nop
267 .previous
205 268
206 brz %o2, 9f 269 brz %o2, 9f
207 nop 270 nop
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 686bf6b3b03f..a09a8a2383dd 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -71,6 +71,9 @@ SECTIONS
71 __con_initcall_end = .; 71 __con_initcall_end = .;
72 SECURITY_INIT 72 SECURITY_INIT
73 . = ALIGN(4); 73 . = ALIGN(4);
74 __tsb_ldquad_phys_patch = .;
75 .tsb_ldquad_phys_patch : { *(.tsb_ldquad_phys_patch) }
76 __tsb_ldquad_phys_patch_end = .;
74 __tsb_phys_patch = .; 77 __tsb_phys_patch = .;
75 .tsb_phys_patch : { *(.tsb_phys_patch) } 78 .tsb_phys_patch : { *(.tsb_phys_patch) }
76 __tsb_phys_patch_end = .; 79 __tsb_phys_patch_end = .;
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index ab50cd9618f3..e9aac424877f 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -1050,8 +1050,25 @@ unsigned long __init find_ecache_flush_span(unsigned long size)
1050 1050
1051static void __init tsb_phys_patch(void) 1051static void __init tsb_phys_patch(void)
1052{ 1052{
1053 struct tsb_ldquad_phys_patch_entry *pquad;
1053 struct tsb_phys_patch_entry *p; 1054 struct tsb_phys_patch_entry *p;
1054 1055
1056 pquad = &__tsb_ldquad_phys_patch;
1057 while (pquad < &__tsb_ldquad_phys_patch_end) {
1058 unsigned long addr = pquad->addr;
1059
1060 if (tlb_type == hypervisor)
1061 *(unsigned int *) addr = pquad->sun4v_insn;
1062 else
1063 *(unsigned int *) addr = pquad->sun4u_insn;
1064 wmb();
1065 __asm__ __volatile__("flush %0"
1066 : /* no outputs */
1067 : "r" (addr));
1068
1069 pquad++;
1070 }
1071
1055 p = &__tsb_phys_patch; 1072 p = &__tsb_phys_patch;
1056 while (p < &__tsb_phys_patch_end) { 1073 while (p < &__tsb_phys_patch_end) {
1057 unsigned long addr = p->addr; 1074 unsigned long addr = p->addr;
@@ -1069,6 +1086,7 @@ static void __init tsb_phys_patch(void)
1069/* paging_init() sets up the page tables */ 1086/* paging_init() sets up the page tables */
1070 1087
1071extern void cheetah_ecache_flush_init(void); 1088extern void cheetah_ecache_flush_init(void);
1089extern void sun4v_patch_tlb_handlers(void);
1072 1090
1073static unsigned long last_valid_pfn; 1091static unsigned long last_valid_pfn;
1074pgd_t swapper_pg_dir[2048]; 1092pgd_t swapper_pg_dir[2048];
@@ -1078,9 +1096,13 @@ void __init paging_init(void)
1078 unsigned long end_pfn, pages_avail, shift; 1096 unsigned long end_pfn, pages_avail, shift;
1079 unsigned long real_end, i; 1097 unsigned long real_end, i;
1080 1098
1081 if (tlb_type == cheetah_plus) 1099 if (tlb_type == cheetah_plus ||
1100 tlb_type == hypervisor)
1082 tsb_phys_patch(); 1101 tsb_phys_patch();
1083 1102
1103 if (tlb_type == hypervisor)
1104 sun4v_patch_tlb_handlers();
1105
1084 /* Find available physical memory... */ 1106 /* Find available physical memory... */
1085 read_obp_memory("available", &pavail[0], &pavail_ents); 1107 read_obp_memory("available", &pavail[0], &pavail_ents);
1086 1108
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 998145b92653..a3dc4afc4b21 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -6,6 +6,8 @@
6#ifndef _SPARC64_CPUDATA_H 6#ifndef _SPARC64_CPUDATA_H
7#define _SPARC64_CPUDATA_H 7#define _SPARC64_CPUDATA_H
8 8
9#include <asm/hypervisor.h>
10
9#ifndef __ASSEMBLY__ 11#ifndef __ASSEMBLY__
10 12
11#include <linux/percpu.h> 13#include <linux/percpu.h>
@@ -57,6 +59,9 @@ struct trap_per_cpu {
57 59
58/* D-cache line 2 */ 60/* D-cache line 2 */
59 unsigned long __pad2[4]; 61 unsigned long __pad2[4];
62
63/* Dcache lines 3 and 4 */
64 struct hv_fault_status fault_info;
60} __attribute__((aligned(64))); 65} __attribute__((aligned(64)));
61extern struct trap_per_cpu trap_block[NR_CPUS]; 66extern struct trap_per_cpu trap_block[NR_CPUS];
62extern void init_cur_cpu_trap(void); 67extern void init_cur_cpu_trap(void);
@@ -88,8 +93,9 @@ extern struct gl_2insn_patch_entry __gl_2insn_patch, __gl_2insn_patch_end;
88 93
89#define TRAP_PER_CPU_THREAD 0x00 94#define TRAP_PER_CPU_THREAD 0x00
90#define TRAP_PER_CPU_PGD_PADDR 0x08 95#define TRAP_PER_CPU_PGD_PADDR 0x08
96#define TRAP_PER_CPU_FAULT_INFO 0x20
91 97
92#define TRAP_BLOCK_SZ_SHIFT 6 98#define TRAP_BLOCK_SZ_SHIFT 7
93 99
94#include <asm/scratchpad.h> 100#include <asm/scratchpad.h>
95 101
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 44709cde5617..7f3abc32c4dd 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -53,6 +53,14 @@
53 * kernel image, so we don't play these games for swapper_tsb access. 53 * kernel image, so we don't play these games for swapper_tsb access.
54 */ 54 */
55#ifndef __ASSEMBLY__ 55#ifndef __ASSEMBLY__
56struct tsb_ldquad_phys_patch_entry {
57 unsigned int addr;
58 unsigned int sun4u_insn;
59 unsigned int sun4v_insn;
60};
61extern struct tsb_ldquad_phys_patch_entry __tsb_ldquad_phys_patch,
62 __tsb_ldquad_phys_patch_end;
63
56struct tsb_phys_patch_entry { 64struct tsb_phys_patch_entry {
57 unsigned int addr; 65 unsigned int addr;
58 unsigned int insn; 66 unsigned int insn;
@@ -61,9 +69,10 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
61#endif 69#endif
62#define TSB_LOAD_QUAD(TSB, REG) \ 70#define TSB_LOAD_QUAD(TSB, REG) \
63661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \ 71661: ldda [TSB] ASI_NUCLEUS_QUAD_LDD, REG; \
64 .section .tsb_phys_patch, "ax"; \ 72 .section .tsb_ldquad_phys_patch, "ax"; \
65 .word 661b; \ 73 .word 661b; \
66 ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \ 74 ldda [TSB] ASI_QUAD_LDD_PHYS, REG; \
75 ldda [TSB] ASI_QUAD_LDD_PHYS_4V, REG; \
67 .previous 76 .previous
68 77
69#define TSB_LOAD_TAG_HIGH(TSB, REG) \ 78#define TSB_LOAD_TAG_HIGH(TSB, REG) \