diff options
author | Scott Wood <scottwood@freescale.com> | 2011-12-20 10:34:34 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-04-08 05:51:12 -0400 |
commit | 8fdd21a26876ea6c486c38bfa75fdd18ba299351 (patch) | |
tree | 01ba127004f254e42b00760d6a540ba0db464f5b /arch/powerpc/kvm | |
parent | 52e1718c6fd1a1f54c676c2107dc931e93865fe8 (diff) |
KVM: PPC: e500: refactor core-specific TLB code
The PID handling is e500v1/v2-specific, and is moved to e500.c.
The MMU sregs code and kvmppc_core_vcpu_translate will be shared with
e500mc, and is moved from e500.c to e500_tlb.c.
Partially based on patches from Liu Yu <yu.liu@freescale.com>.
Signed-off-by: Scott Wood <scottwood@freescale.com>
[agraf: fix bisectability]
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/e500.c | 357 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500.h | 62 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_emulate.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/e500_tlb.c | 460 |
4 files changed, 471 insertions, 414 deletions
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index 76b35d8f099f..b479ed77c515 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -22,9 +22,281 @@ | |||
22 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
23 | #include <asm/kvm_ppc.h> | 23 | #include <asm/kvm_ppc.h> |
24 | 24 | ||
25 | #include "../mm/mmu_decl.h" | ||
25 | #include "booke.h" | 26 | #include "booke.h" |
26 | #include "e500.h" | 27 | #include "e500.h" |
27 | 28 | ||
29 | struct id { | ||
30 | unsigned long val; | ||
31 | struct id **pentry; | ||
32 | }; | ||
33 | |||
34 | #define NUM_TIDS 256 | ||
35 | |||
36 | /* | ||
37 | * This table provide mappings from: | ||
38 | * (guestAS,guestTID,guestPR) --> ID of physical cpu | ||
39 | * guestAS [0..1] | ||
40 | * guestTID [0..255] | ||
41 | * guestPR [0..1] | ||
42 | * ID [1..255] | ||
43 | * Each vcpu keeps one vcpu_id_table. | ||
44 | */ | ||
45 | struct vcpu_id_table { | ||
46 | struct id id[2][NUM_TIDS][2]; | ||
47 | }; | ||
48 | |||
49 | /* | ||
50 | * This table provide reversed mappings of vcpu_id_table: | ||
51 | * ID --> address of vcpu_id_table item. | ||
52 | * Each physical core has one pcpu_id_table. | ||
53 | */ | ||
54 | struct pcpu_id_table { | ||
55 | struct id *entry[NUM_TIDS]; | ||
56 | }; | ||
57 | |||
58 | static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids); | ||
59 | |||
60 | /* This variable keeps last used shadow ID on local core. | ||
61 | * The valid range of shadow ID is [1..255] */ | ||
62 | static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid); | ||
63 | |||
64 | /* | ||
65 | * Allocate a free shadow id and setup a valid sid mapping in given entry. | ||
66 | * A mapping is only valid when vcpu_id_table and pcpu_id_table are match. | ||
67 | * | ||
68 | * The caller must have preemption disabled, and keep it that way until | ||
69 | * it has finished with the returned shadow id (either written into the | ||
70 | * TLB or arch.shadow_pid, or discarded). | ||
71 | */ | ||
72 | static inline int local_sid_setup_one(struct id *entry) | ||
73 | { | ||
74 | unsigned long sid; | ||
75 | int ret = -1; | ||
76 | |||
77 | sid = ++(__get_cpu_var(pcpu_last_used_sid)); | ||
78 | if (sid < NUM_TIDS) { | ||
79 | __get_cpu_var(pcpu_sids).entry[sid] = entry; | ||
80 | entry->val = sid; | ||
81 | entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; | ||
82 | ret = sid; | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * If sid == NUM_TIDS, we've run out of sids. We return -1, and | ||
87 | * the caller will invalidate everything and start over. | ||
88 | * | ||
89 | * sid > NUM_TIDS indicates a race, which we disable preemption to | ||
90 | * avoid. | ||
91 | */ | ||
92 | WARN_ON(sid > NUM_TIDS); | ||
93 | |||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * Check if given entry contain a valid shadow id mapping. | ||
99 | * An ID mapping is considered valid only if | ||
100 | * both vcpu and pcpu know this mapping. | ||
101 | * | ||
102 | * The caller must have preemption disabled, and keep it that way until | ||
103 | * it has finished with the returned shadow id (either written into the | ||
104 | * TLB or arch.shadow_pid, or discarded). | ||
105 | */ | ||
106 | static inline int local_sid_lookup(struct id *entry) | ||
107 | { | ||
108 | if (entry && entry->val != 0 && | ||
109 | __get_cpu_var(pcpu_sids).entry[entry->val] == entry && | ||
110 | entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) | ||
111 | return entry->val; | ||
112 | return -1; | ||
113 | } | ||
114 | |||
115 | /* Invalidate all id mappings on local core -- call with preempt disabled */ | ||
116 | static inline void local_sid_destroy_all(void) | ||
117 | { | ||
118 | __get_cpu_var(pcpu_last_used_sid) = 0; | ||
119 | memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); | ||
120 | } | ||
121 | |||
122 | static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
123 | { | ||
124 | vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL); | ||
125 | return vcpu_e500->idt; | ||
126 | } | ||
127 | |||
128 | static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
129 | { | ||
130 | kfree(vcpu_e500->idt); | ||
131 | vcpu_e500->idt = NULL; | ||
132 | } | ||
133 | |||
134 | /* Map guest pid to shadow. | ||
135 | * We use PID to keep shadow of current guest non-zero PID, | ||
136 | * and use PID1 to keep shadow of guest zero PID. | ||
137 | * So that guest tlbe with TID=0 can be accessed at any time */ | ||
138 | static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
139 | { | ||
140 | preempt_disable(); | ||
141 | vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, | ||
142 | get_cur_as(&vcpu_e500->vcpu), | ||
143 | get_cur_pid(&vcpu_e500->vcpu), | ||
144 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
145 | vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, | ||
146 | get_cur_as(&vcpu_e500->vcpu), 0, | ||
147 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
148 | preempt_enable(); | ||
149 | } | ||
150 | |||
151 | /* Invalidate all mappings on vcpu */ | ||
152 | static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
153 | { | ||
154 | memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table)); | ||
155 | |||
156 | /* Update shadow pid when mappings are changed */ | ||
157 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
158 | } | ||
159 | |||
160 | /* Invalidate one ID mapping on vcpu */ | ||
161 | static inline void kvmppc_e500_id_table_reset_one( | ||
162 | struct kvmppc_vcpu_e500 *vcpu_e500, | ||
163 | int as, int pid, int pr) | ||
164 | { | ||
165 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
166 | |||
167 | BUG_ON(as >= 2); | ||
168 | BUG_ON(pid >= NUM_TIDS); | ||
169 | BUG_ON(pr >= 2); | ||
170 | |||
171 | idt->id[as][pid][pr].val = 0; | ||
172 | idt->id[as][pid][pr].pentry = NULL; | ||
173 | |||
174 | /* Update shadow pid when mappings are changed */ | ||
175 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Map guest (vcpu,AS,ID,PR) to physical core shadow id. | ||
180 | * This function first lookup if a valid mapping exists, | ||
181 | * if not, then creates a new one. | ||
182 | * | ||
183 | * The caller must have preemption disabled, and keep it that way until | ||
184 | * it has finished with the returned shadow id (either written into the | ||
185 | * TLB or arch.shadow_pid, or discarded). | ||
186 | */ | ||
187 | unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
188 | unsigned int as, unsigned int gid, | ||
189 | unsigned int pr, int avoid_recursion) | ||
190 | { | ||
191 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
192 | int sid; | ||
193 | |||
194 | BUG_ON(as >= 2); | ||
195 | BUG_ON(gid >= NUM_TIDS); | ||
196 | BUG_ON(pr >= 2); | ||
197 | |||
198 | sid = local_sid_lookup(&idt->id[as][gid][pr]); | ||
199 | |||
200 | while (sid <= 0) { | ||
201 | /* No mapping yet */ | ||
202 | sid = local_sid_setup_one(&idt->id[as][gid][pr]); | ||
203 | if (sid <= 0) { | ||
204 | _tlbil_all(); | ||
205 | local_sid_destroy_all(); | ||
206 | } | ||
207 | |||
208 | /* Update shadow pid when mappings are changed */ | ||
209 | if (!avoid_recursion) | ||
210 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
211 | } | ||
212 | |||
213 | return sid; | ||
214 | } | ||
215 | |||
216 | unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, | ||
217 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
218 | { | ||
219 | return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe), | ||
220 | get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0); | ||
221 | } | ||
222 | |||
223 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) | ||
224 | { | ||
225 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
226 | |||
227 | if (vcpu->arch.pid != pid) { | ||
228 | vcpu_e500->pid[0] = vcpu->arch.pid = pid; | ||
229 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
230 | } | ||
231 | } | ||
232 | |||
233 | /* gtlbe must not be mapped by more than one host tlbe */ | ||
234 | void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
235 | struct kvm_book3e_206_tlb_entry *gtlbe) | ||
236 | { | ||
237 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
238 | unsigned int pr, tid, ts, pid; | ||
239 | u32 val, eaddr; | ||
240 | unsigned long flags; | ||
241 | |||
242 | ts = get_tlb_ts(gtlbe); | ||
243 | tid = get_tlb_tid(gtlbe); | ||
244 | |||
245 | preempt_disable(); | ||
246 | |||
247 | /* One guest ID may be mapped to two shadow IDs */ | ||
248 | for (pr = 0; pr < 2; pr++) { | ||
249 | /* | ||
250 | * The shadow PID can have a valid mapping on at most one | ||
251 | * host CPU. In the common case, it will be valid on this | ||
252 | * CPU, in which case we do a local invalidation of the | ||
253 | * specific address. | ||
254 | * | ||
255 | * If the shadow PID is not valid on the current host CPU, | ||
256 | * we invalidate the entire shadow PID. | ||
257 | */ | ||
258 | pid = local_sid_lookup(&idt->id[ts][tid][pr]); | ||
259 | if (pid <= 0) { | ||
260 | kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr); | ||
261 | continue; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * The guest is invalidating a 4K entry which is in a PID | ||
266 | * that has a valid shadow mapping on this host CPU. We | ||
267 | * search host TLB to invalidate it's shadow TLB entry, | ||
268 | * similar to __tlbil_va except that we need to look in AS1. | ||
269 | */ | ||
270 | val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS; | ||
271 | eaddr = get_tlb_eaddr(gtlbe); | ||
272 | |||
273 | local_irq_save(flags); | ||
274 | |||
275 | mtspr(SPRN_MAS6, val); | ||
276 | asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); | ||
277 | val = mfspr(SPRN_MAS1); | ||
278 | if (val & MAS1_VALID) { | ||
279 | mtspr(SPRN_MAS1, val & ~MAS1_VALID); | ||
280 | asm volatile("tlbwe"); | ||
281 | } | ||
282 | |||
283 | local_irq_restore(flags); | ||
284 | } | ||
285 | |||
286 | preempt_enable(); | ||
287 | } | ||
288 | |||
289 | void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
290 | { | ||
291 | kvmppc_e500_id_table_reset_all(vcpu_e500); | ||
292 | } | ||
293 | |||
294 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | ||
295 | { | ||
296 | /* Recalc shadow pid since MSR changes */ | ||
297 | kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); | ||
298 | } | ||
299 | |||
28 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) | 300 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) |
29 | { | 301 | { |
30 | } | 302 | } |
@@ -36,13 +308,13 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
36 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 308 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
37 | { | 309 | { |
38 | kvmppc_booke_vcpu_load(vcpu, cpu); | 310 | kvmppc_booke_vcpu_load(vcpu, cpu); |
39 | kvmppc_e500_tlb_load(vcpu, cpu); | 311 | |
312 | /* Shadow PID may be expired on local core */ | ||
313 | kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); | ||
40 | } | 314 | } |
41 | 315 | ||
42 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 316 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
43 | { | 317 | { |
44 | kvmppc_e500_tlb_put(vcpu); | ||
45 | |||
46 | #ifdef CONFIG_SPE | 318 | #ifdef CONFIG_SPE |
47 | if (vcpu->arch.shadow_msr & MSR_SPE) | 319 | if (vcpu->arch.shadow_msr & MSR_SPE) |
48 | kvmppc_vcpu_disable_spe(vcpu); | 320 | kvmppc_vcpu_disable_spe(vcpu); |
@@ -63,6 +335,23 @@ int kvmppc_core_check_processor_compat(void) | |||
63 | return r; | 335 | return r; |
64 | } | 336 | } |
65 | 337 | ||
338 | static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
339 | { | ||
340 | struct kvm_book3e_206_tlb_entry *tlbe; | ||
341 | |||
342 | /* Insert large initial mapping for guest. */ | ||
343 | tlbe = get_entry(vcpu_e500, 1, 0); | ||
344 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); | ||
345 | tlbe->mas2 = 0; | ||
346 | tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK; | ||
347 | |||
348 | /* 4K map for serial output. Used by kernel wrapper. */ | ||
349 | tlbe = get_entry(vcpu_e500, 1, 1); | ||
350 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); | ||
351 | tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; | ||
352 | tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; | ||
353 | } | ||
354 | |||
66 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | 355 | int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) |
67 | { | 356 | { |
68 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 357 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
@@ -78,32 +367,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
78 | return 0; | 367 | return 0; |
79 | } | 368 | } |
80 | 369 | ||
81 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
82 | int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | ||
83 | struct kvm_translation *tr) | ||
84 | { | ||
85 | int index; | ||
86 | gva_t eaddr; | ||
87 | u8 pid; | ||
88 | u8 as; | ||
89 | |||
90 | eaddr = tr->linear_address; | ||
91 | pid = (tr->linear_address >> 32) & 0xff; | ||
92 | as = (tr->linear_address >> 40) & 0x1; | ||
93 | |||
94 | index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); | ||
95 | if (index < 0) { | ||
96 | tr->valid = 0; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); | ||
101 | /* XXX what does "writeable" and "usermode" even mean? */ | ||
102 | tr->valid = 1; | ||
103 | |||
104 | return 0; | ||
105 | } | ||
106 | |||
107 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 370 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
108 | { | 371 | { |
109 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 372 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
@@ -117,19 +380,6 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
117 | sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; | 380 | sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; |
118 | sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; | 381 | sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; |
119 | 382 | ||
120 | sregs->u.e.mas0 = vcpu->arch.shared->mas0; | ||
121 | sregs->u.e.mas1 = vcpu->arch.shared->mas1; | ||
122 | sregs->u.e.mas2 = vcpu->arch.shared->mas2; | ||
123 | sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; | ||
124 | sregs->u.e.mas4 = vcpu->arch.shared->mas4; | ||
125 | sregs->u.e.mas6 = vcpu->arch.shared->mas6; | ||
126 | |||
127 | sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG); | ||
128 | sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg; | ||
129 | sregs->u.e.tlbcfg[1] = vcpu_e500->tlb1cfg; | ||
130 | sregs->u.e.tlbcfg[2] = 0; | ||
131 | sregs->u.e.tlbcfg[3] = 0; | ||
132 | |||
133 | sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; | 383 | sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; |
134 | sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; | 384 | sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; |
135 | sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; | 385 | sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; |
@@ -137,11 +387,13 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
137 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; | 387 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; |
138 | 388 | ||
139 | kvmppc_get_sregs_ivor(vcpu, sregs); | 389 | kvmppc_get_sregs_ivor(vcpu, sregs); |
390 | kvmppc_get_sregs_e500_tlb(vcpu, sregs); | ||
140 | } | 391 | } |
141 | 392 | ||
142 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 393 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
143 | { | 394 | { |
144 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 395 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
396 | int ret; | ||
145 | 397 | ||
146 | if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { | 398 | if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { |
147 | vcpu_e500->svr = sregs->u.e.impl.fsl.svr; | 399 | vcpu_e500->svr = sregs->u.e.impl.fsl.svr; |
@@ -149,14 +401,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |||
149 | vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; | 401 | vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; |
150 | } | 402 | } |
151 | 403 | ||
152 | if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { | 404 | ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs); |
153 | vcpu->arch.shared->mas0 = sregs->u.e.mas0; | 405 | if (ret < 0) |
154 | vcpu->arch.shared->mas1 = sregs->u.e.mas1; | 406 | return ret; |
155 | vcpu->arch.shared->mas2 = sregs->u.e.mas2; | ||
156 | vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; | ||
157 | vcpu->arch.shared->mas4 = sregs->u.e.mas4; | ||
158 | vcpu->arch.shared->mas6 = sregs->u.e.mas6; | ||
159 | } | ||
160 | 407 | ||
161 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | 408 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) |
162 | return 0; | 409 | return 0; |
@@ -195,9 +442,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
195 | if (err) | 442 | if (err) |
196 | goto free_vcpu; | 443 | goto free_vcpu; |
197 | 444 | ||
445 | if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) | ||
446 | goto uninit_vcpu; | ||
447 | |||
198 | err = kvmppc_e500_tlb_init(vcpu_e500); | 448 | err = kvmppc_e500_tlb_init(vcpu_e500); |
199 | if (err) | 449 | if (err) |
200 | goto uninit_vcpu; | 450 | goto uninit_id; |
201 | 451 | ||
202 | vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); | 452 | vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
203 | if (!vcpu->arch.shared) | 453 | if (!vcpu->arch.shared) |
@@ -207,6 +457,8 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
207 | 457 | ||
208 | uninit_tlb: | 458 | uninit_tlb: |
209 | kvmppc_e500_tlb_uninit(vcpu_e500); | 459 | kvmppc_e500_tlb_uninit(vcpu_e500); |
460 | uninit_id: | ||
461 | kvmppc_e500_id_table_free(vcpu_e500); | ||
210 | uninit_vcpu: | 462 | uninit_vcpu: |
211 | kvm_vcpu_uninit(vcpu); | 463 | kvm_vcpu_uninit(vcpu); |
212 | free_vcpu: | 464 | free_vcpu: |
@@ -220,8 +472,9 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
220 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 472 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
221 | 473 | ||
222 | free_page((unsigned long)vcpu->arch.shared); | 474 | free_page((unsigned long)vcpu->arch.shared); |
223 | kvm_vcpu_uninit(vcpu); | ||
224 | kvmppc_e500_tlb_uninit(vcpu_e500); | 475 | kvmppc_e500_tlb_uninit(vcpu_e500); |
476 | kvmppc_e500_id_table_free(vcpu_e500); | ||
477 | kvm_vcpu_uninit(vcpu); | ||
225 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | 478 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); |
226 | } | 479 | } |
227 | 480 | ||
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h index a48af005c223..34cef08f1361 100644 --- a/arch/powerpc/kvm/e500.h +++ b/arch/powerpc/kvm/e500.h | |||
@@ -35,7 +35,9 @@ struct tlbe_priv { | |||
35 | struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ | 35 | struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */ |
36 | }; | 36 | }; |
37 | 37 | ||
38 | #ifdef CONFIG_KVM_E500 | ||
38 | struct vcpu_id_table; | 39 | struct vcpu_id_table; |
40 | #endif | ||
39 | 41 | ||
40 | struct kvmppc_e500_tlb_params { | 42 | struct kvmppc_e500_tlb_params { |
41 | int entries, ways, sets; | 43 | int entries, ways, sets; |
@@ -70,23 +72,22 @@ struct kvmppc_vcpu_e500 { | |||
70 | struct tlbe_ref *tlb_refs[E500_TLB_NUM]; | 72 | struct tlbe_ref *tlb_refs[E500_TLB_NUM]; |
71 | unsigned int host_tlb1_nv; | 73 | unsigned int host_tlb1_nv; |
72 | 74 | ||
73 | u32 host_pid[E500_PID_NUM]; | ||
74 | u32 pid[E500_PID_NUM]; | ||
75 | u32 svr; | 75 | u32 svr; |
76 | |||
77 | /* vcpu id table */ | ||
78 | struct vcpu_id_table *idt; | ||
79 | |||
80 | u32 l1csr0; | 76 | u32 l1csr0; |
81 | u32 l1csr1; | 77 | u32 l1csr1; |
82 | u32 hid0; | 78 | u32 hid0; |
83 | u32 hid1; | 79 | u32 hid1; |
84 | u32 tlb0cfg; | ||
85 | u32 tlb1cfg; | ||
86 | u64 mcar; | 80 | u64 mcar; |
87 | 81 | ||
88 | struct page **shared_tlb_pages; | 82 | struct page **shared_tlb_pages; |
89 | int num_shared_tlb_pages; | 83 | int num_shared_tlb_pages; |
84 | |||
85 | #ifdef CONFIG_KVM_E500 | ||
86 | u32 pid[E500_PID_NUM]; | ||
87 | |||
88 | /* vcpu id table */ | ||
89 | struct vcpu_id_table *idt; | ||
90 | #endif | ||
90 | }; | 91 | }; |
91 | 92 | ||
92 | static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) | 93 | static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) |
@@ -113,23 +114,25 @@ static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu) | |||
113 | (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ | 114 | (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \ |
114 | | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) | 115 | | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK) |
115 | 116 | ||
116 | extern void kvmppc_e500_tlb_put(struct kvm_vcpu *); | ||
117 | extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int); | ||
118 | extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *); | ||
119 | extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *); | ||
120 | int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, | 117 | int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, |
121 | ulong value); | 118 | ulong value); |
122 | int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu); | 119 | int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu); |
123 | int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu); | 120 | int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu); |
124 | int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb); | 121 | int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb); |
125 | int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb); | 122 | int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb); |
126 | int kvmppc_e500_tlb_search(struct kvm_vcpu *, gva_t, unsigned int, int); | ||
127 | int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500); | 123 | int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500); |
128 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); | 124 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500); |
129 | 125 | ||
130 | void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | 126 | void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
131 | int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | 127 | int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
132 | 128 | ||
129 | |||
130 | #ifdef CONFIG_KVM_E500 | ||
131 | unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
132 | unsigned int as, unsigned int gid, | ||
133 | unsigned int pr, int avoid_recursion); | ||
134 | #endif | ||
135 | |||
133 | /* TLB helper functions */ | 136 | /* TLB helper functions */ |
134 | static inline unsigned int | 137 | static inline unsigned int |
135 | get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe) | 138 | get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe) |
@@ -183,6 +186,12 @@ get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe) | |||
183 | return (tlbe->mas1 >> 30) & 0x1; | 186 | return (tlbe->mas1 >> 30) & 0x1; |
184 | } | 187 | } |
185 | 188 | ||
189 | static inline unsigned int | ||
190 | get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe) | ||
191 | { | ||
192 | return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; | ||
193 | } | ||
194 | |||
186 | static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu) | 195 | static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu) |
187 | { | 196 | { |
188 | return vcpu->arch.pid & 0xff; | 197 | return vcpu->arch.pid & 0xff; |
@@ -248,4 +257,31 @@ static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | |||
248 | return 1; | 257 | return 1; |
249 | } | 258 | } |
250 | 259 | ||
260 | static inline struct kvm_book3e_206_tlb_entry *get_entry( | ||
261 | struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry) | ||
262 | { | ||
263 | int offset = vcpu_e500->gtlb_offset[tlbsel]; | ||
264 | return &vcpu_e500->gtlb_arch[offset + entry]; | ||
265 | } | ||
266 | |||
267 | void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
268 | struct kvm_book3e_206_tlb_entry *gtlbe); | ||
269 | void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500); | ||
270 | |||
271 | #ifdef CONFIG_KVM_E500 | ||
272 | unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, | ||
273 | struct kvm_book3e_206_tlb_entry *gtlbe); | ||
274 | |||
275 | static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu) | ||
276 | { | ||
277 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
278 | unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf; | ||
279 | |||
280 | return vcpu_e500->pid[tidseld]; | ||
281 | } | ||
282 | |||
283 | /* Force TS=1 for all guest mappings. */ | ||
284 | #define get_tlb_sts(gtlbe) (MAS1_TS) | ||
285 | #endif /* CONFIG_KVM_E500 */ | ||
286 | |||
251 | #endif /* KVM_E500_H */ | 287 | #endif /* KVM_E500_H */ |
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 7e2d592bf562..c80794d097d3 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -174,9 +174,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
174 | kvmppc_set_gpr(vcpu, rt, val); | 174 | kvmppc_set_gpr(vcpu, rt, val); |
175 | break; | 175 | break; |
176 | case SPRN_TLB0CFG: | 176 | case SPRN_TLB0CFG: |
177 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; | 177 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[0]); break; |
178 | case SPRN_TLB1CFG: | 178 | case SPRN_TLB1CFG: |
179 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break; | 179 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.tlbcfg[1]); break; |
180 | case SPRN_L1CSR0: | 180 | case SPRN_L1CSR0: |
181 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; | 181 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; |
182 | case SPRN_L1CSR1: | 182 | case SPRN_L1CSR1: |
@@ -192,7 +192,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
192 | kvmppc_set_gpr(vcpu, rt, 0); break; | 192 | kvmppc_set_gpr(vcpu, rt, 0); break; |
193 | 193 | ||
194 | case SPRN_MMUCFG: | 194 | case SPRN_MMUCFG: |
195 | kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break; | 195 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucfg); break; |
196 | 196 | ||
197 | /* extra exceptions */ | 197 | /* extra exceptions */ |
198 | case SPRN_IVOR32: | 198 | case SPRN_IVOR32: |
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 7d4a918a0ab0..9925fc6c9cfb 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c | |||
@@ -27,208 +27,14 @@ | |||
27 | #include <linux/hugetlb.h> | 27 | #include <linux/hugetlb.h> |
28 | #include <asm/kvm_ppc.h> | 28 | #include <asm/kvm_ppc.h> |
29 | 29 | ||
30 | #include "../mm/mmu_decl.h" | ||
31 | #include "e500.h" | 30 | #include "e500.h" |
32 | #include "trace.h" | 31 | #include "trace.h" |
33 | #include "timing.h" | 32 | #include "timing.h" |
34 | 33 | ||
35 | #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) | 34 | #define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) |
36 | 35 | ||
37 | struct id { | ||
38 | unsigned long val; | ||
39 | struct id **pentry; | ||
40 | }; | ||
41 | |||
42 | #define NUM_TIDS 256 | ||
43 | |||
44 | /* | ||
45 | * This table provide mappings from: | ||
46 | * (guestAS,guestTID,guestPR) --> ID of physical cpu | ||
47 | * guestAS [0..1] | ||
48 | * guestTID [0..255] | ||
49 | * guestPR [0..1] | ||
50 | * ID [1..255] | ||
51 | * Each vcpu keeps one vcpu_id_table. | ||
52 | */ | ||
53 | struct vcpu_id_table { | ||
54 | struct id id[2][NUM_TIDS][2]; | ||
55 | }; | ||
56 | |||
57 | /* | ||
58 | * This table provide reversed mappings of vcpu_id_table: | ||
59 | * ID --> address of vcpu_id_table item. | ||
60 | * Each physical core has one pcpu_id_table. | ||
61 | */ | ||
62 | struct pcpu_id_table { | ||
63 | struct id *entry[NUM_TIDS]; | ||
64 | }; | ||
65 | |||
66 | static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids); | ||
67 | |||
68 | /* This variable keeps last used shadow ID on local core. | ||
69 | * The valid range of shadow ID is [1..255] */ | ||
70 | static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid); | ||
71 | |||
72 | static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; | 36 | static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; |
73 | 37 | ||
74 | static struct kvm_book3e_206_tlb_entry *get_entry( | ||
75 | struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry) | ||
76 | { | ||
77 | int offset = vcpu_e500->gtlb_offset[tlbsel]; | ||
78 | return &vcpu_e500->gtlb_arch[offset + entry]; | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * Allocate a free shadow id and setup a valid sid mapping in given entry. | ||
83 | * A mapping is only valid when vcpu_id_table and pcpu_id_table are match. | ||
84 | * | ||
85 | * The caller must have preemption disabled, and keep it that way until | ||
86 | * it has finished with the returned shadow id (either written into the | ||
87 | * TLB or arch.shadow_pid, or discarded). | ||
88 | */ | ||
89 | static inline int local_sid_setup_one(struct id *entry) | ||
90 | { | ||
91 | unsigned long sid; | ||
92 | int ret = -1; | ||
93 | |||
94 | sid = ++(__get_cpu_var(pcpu_last_used_sid)); | ||
95 | if (sid < NUM_TIDS) { | ||
96 | __get_cpu_var(pcpu_sids).entry[sid] = entry; | ||
97 | entry->val = sid; | ||
98 | entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; | ||
99 | ret = sid; | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * If sid == NUM_TIDS, we've run out of sids. We return -1, and | ||
104 | * the caller will invalidate everything and start over. | ||
105 | * | ||
106 | * sid > NUM_TIDS indicates a race, which we disable preemption to | ||
107 | * avoid. | ||
108 | */ | ||
109 | WARN_ON(sid > NUM_TIDS); | ||
110 | |||
111 | return ret; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Check if given entry contain a valid shadow id mapping. | ||
116 | * An ID mapping is considered valid only if | ||
117 | * both vcpu and pcpu know this mapping. | ||
118 | * | ||
119 | * The caller must have preemption disabled, and keep it that way until | ||
120 | * it has finished with the returned shadow id (either written into the | ||
121 | * TLB or arch.shadow_pid, or discarded). | ||
122 | */ | ||
123 | static inline int local_sid_lookup(struct id *entry) | ||
124 | { | ||
125 | if (entry && entry->val != 0 && | ||
126 | __get_cpu_var(pcpu_sids).entry[entry->val] == entry && | ||
127 | entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) | ||
128 | return entry->val; | ||
129 | return -1; | ||
130 | } | ||
131 | |||
132 | /* Invalidate all id mappings on local core -- call with preempt disabled */ | ||
133 | static inline void local_sid_destroy_all(void) | ||
134 | { | ||
135 | __get_cpu_var(pcpu_last_used_sid) = 0; | ||
136 | memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); | ||
137 | } | ||
138 | |||
139 | static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
140 | { | ||
141 | vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL); | ||
142 | return vcpu_e500->idt; | ||
143 | } | ||
144 | |||
145 | static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
146 | { | ||
147 | kfree(vcpu_e500->idt); | ||
148 | } | ||
149 | |||
150 | /* Invalidate all mappings on vcpu */ | ||
151 | static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
152 | { | ||
153 | memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table)); | ||
154 | |||
155 | /* Update shadow pid when mappings are changed */ | ||
156 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
157 | } | ||
158 | |||
159 | /* Invalidate one ID mapping on vcpu */ | ||
160 | static inline void kvmppc_e500_id_table_reset_one( | ||
161 | struct kvmppc_vcpu_e500 *vcpu_e500, | ||
162 | int as, int pid, int pr) | ||
163 | { | ||
164 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
165 | |||
166 | BUG_ON(as >= 2); | ||
167 | BUG_ON(pid >= NUM_TIDS); | ||
168 | BUG_ON(pr >= 2); | ||
169 | |||
170 | idt->id[as][pid][pr].val = 0; | ||
171 | idt->id[as][pid][pr].pentry = NULL; | ||
172 | |||
173 | /* Update shadow pid when mappings are changed */ | ||
174 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Map guest (vcpu,AS,ID,PR) to physical core shadow id. | ||
179 | * This function first lookup if a valid mapping exists, | ||
180 | * if not, then creates a new one. | ||
181 | * | ||
182 | * The caller must have preemption disabled, and keep it that way until | ||
183 | * it has finished with the returned shadow id (either written into the | ||
184 | * TLB or arch.shadow_pid, or discarded). | ||
185 | */ | ||
186 | static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, | ||
187 | unsigned int as, unsigned int gid, | ||
188 | unsigned int pr, int avoid_recursion) | ||
189 | { | ||
190 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
191 | int sid; | ||
192 | |||
193 | BUG_ON(as >= 2); | ||
194 | BUG_ON(gid >= NUM_TIDS); | ||
195 | BUG_ON(pr >= 2); | ||
196 | |||
197 | sid = local_sid_lookup(&idt->id[as][gid][pr]); | ||
198 | |||
199 | while (sid <= 0) { | ||
200 | /* No mapping yet */ | ||
201 | sid = local_sid_setup_one(&idt->id[as][gid][pr]); | ||
202 | if (sid <= 0) { | ||
203 | _tlbil_all(); | ||
204 | local_sid_destroy_all(); | ||
205 | } | ||
206 | |||
207 | /* Update shadow pid when mappings are changed */ | ||
208 | if (!avoid_recursion) | ||
209 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
210 | } | ||
211 | |||
212 | return sid; | ||
213 | } | ||
214 | |||
215 | /* Map guest pid to shadow. | ||
216 | * We use PID to keep shadow of current guest non-zero PID, | ||
217 | * and use PID1 to keep shadow of guest zero PID. | ||
218 | * So that guest tlbe with TID=0 can be accessed at any time */ | ||
219 | void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
220 | { | ||
221 | preempt_disable(); | ||
222 | vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, | ||
223 | get_cur_as(&vcpu_e500->vcpu), | ||
224 | get_cur_pid(&vcpu_e500->vcpu), | ||
225 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
226 | vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, | ||
227 | get_cur_as(&vcpu_e500->vcpu), 0, | ||
228 | get_cur_pr(&vcpu_e500->vcpu), 1); | ||
229 | preempt_enable(); | ||
230 | } | ||
231 | |||
232 | static inline unsigned int gtlb0_get_next_victim( | 38 | static inline unsigned int gtlb0_get_next_victim( |
233 | struct kvmppc_vcpu_e500 *vcpu_e500) | 39 | struct kvmppc_vcpu_e500 *vcpu_e500) |
234 | { | 40 | { |
@@ -336,6 +142,7 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
336 | } | 142 | } |
337 | } | 143 | } |
338 | 144 | ||
145 | #ifdef CONFIG_KVM_E500 | ||
339 | void kvmppc_map_magic(struct kvm_vcpu *vcpu) | 146 | void kvmppc_map_magic(struct kvm_vcpu *vcpu) |
340 | { | 147 | { |
341 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 148 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
@@ -360,75 +167,21 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu) | |||
360 | __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); | 167 | __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); |
361 | preempt_enable(); | 168 | preempt_enable(); |
362 | } | 169 | } |
363 | 170 | #endif | |
364 | void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu) | ||
365 | { | ||
366 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
367 | |||
368 | /* Shadow PID may be expired on local core */ | ||
369 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
370 | } | ||
371 | |||
372 | void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu) | ||
373 | { | ||
374 | } | ||
375 | 171 | ||
376 | static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, | 172 | static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, |
377 | int tlbsel, int esel) | 173 | int tlbsel, int esel) |
378 | { | 174 | { |
379 | struct kvm_book3e_206_tlb_entry *gtlbe = | 175 | struct kvm_book3e_206_tlb_entry *gtlbe = |
380 | get_entry(vcpu_e500, tlbsel, esel); | 176 | get_entry(vcpu_e500, tlbsel, esel); |
381 | struct vcpu_id_table *idt = vcpu_e500->idt; | ||
382 | unsigned int pr, tid, ts, pid; | ||
383 | u32 val, eaddr; | ||
384 | unsigned long flags; | ||
385 | |||
386 | ts = get_tlb_ts(gtlbe); | ||
387 | tid = get_tlb_tid(gtlbe); | ||
388 | |||
389 | preempt_disable(); | ||
390 | |||
391 | /* One guest ID may be mapped to two shadow IDs */ | ||
392 | for (pr = 0; pr < 2; pr++) { | ||
393 | /* | ||
394 | * The shadow PID can have a valid mapping on at most one | ||
395 | * host CPU. In the common case, it will be valid on this | ||
396 | * CPU, in which case (for TLB0) we do a local invalidation | ||
397 | * of the specific address. | ||
398 | * | ||
399 | * If the shadow PID is not valid on the current host CPU, or | ||
400 | * if we're invalidating a TLB1 entry, we invalidate the | ||
401 | * entire shadow PID. | ||
402 | */ | ||
403 | if (tlbsel == 1 || | ||
404 | (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) { | ||
405 | kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr); | ||
406 | continue; | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * The guest is invalidating a TLB0 entry which is in a PID | ||
411 | * that has a valid shadow mapping on this host CPU. We | ||
412 | * search host TLB0 to invalidate it's shadow TLB entry, | ||
413 | * similar to __tlbil_va except that we need to look in AS1. | ||
414 | */ | ||
415 | val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS; | ||
416 | eaddr = get_tlb_eaddr(gtlbe); | ||
417 | |||
418 | local_irq_save(flags); | ||
419 | |||
420 | mtspr(SPRN_MAS6, val); | ||
421 | asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); | ||
422 | val = mfspr(SPRN_MAS1); | ||
423 | if (val & MAS1_VALID) { | ||
424 | mtspr(SPRN_MAS1, val & ~MAS1_VALID); | ||
425 | asm volatile("tlbwe"); | ||
426 | } | ||
427 | 177 | ||
428 | local_irq_restore(flags); | 178 | if (tlbsel == 1) { |
179 | kvmppc_e500_tlbil_all(vcpu_e500); | ||
180 | return; | ||
429 | } | 181 | } |
430 | 182 | ||
431 | preempt_enable(); | 183 | /* Guest tlbe is backed by at most one host tlbe per shadow pid. */ |
184 | kvmppc_e500_tlbil_one(vcpu_e500, gtlbe); | ||
432 | } | 185 | } |
433 | 186 | ||
434 | static int tlb0_set_base(gva_t addr, int sets, int ways) | 187 | static int tlb0_set_base(gva_t addr, int sets, int ways) |
@@ -546,7 +299,7 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
546 | int stlbsel = 1; | 299 | int stlbsel = 1; |
547 | int i; | 300 | int i; |
548 | 301 | ||
549 | kvmppc_e500_id_table_reset_all(vcpu_e500); | 302 | kvmppc_e500_tlbil_all(vcpu_e500); |
550 | 303 | ||
551 | for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { | 304 | for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { |
552 | struct tlbe_ref *ref = | 305 | struct tlbe_ref *ref = |
@@ -561,19 +314,18 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, | |||
561 | unsigned int eaddr, int as) | 314 | unsigned int eaddr, int as) |
562 | { | 315 | { |
563 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 316 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
564 | unsigned int victim, pidsel, tsized; | 317 | unsigned int victim, tsized; |
565 | int tlbsel; | 318 | int tlbsel; |
566 | 319 | ||
567 | /* since we only have two TLBs, only lower bit is used. */ | 320 | /* since we only have two TLBs, only lower bit is used. */ |
568 | tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; | 321 | tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; |
569 | victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; | 322 | victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; |
570 | pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf; | ||
571 | tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; | 323 | tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; |
572 | 324 | ||
573 | vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) | 325 | vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) |
574 | | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); | 326 | | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); |
575 | vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) | 327 | vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) |
576 | | MAS1_TID(vcpu_e500->pid[pidsel]) | 328 | | MAS1_TID(get_tlbmiss_tid(vcpu)) |
577 | | MAS1_TSIZE(tsized); | 329 | | MAS1_TSIZE(tsized); |
578 | vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) | 330 | vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) |
579 | | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); | 331 | | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); |
@@ -585,23 +337,22 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, | |||
585 | 337 | ||
586 | /* TID must be supplied by the caller */ | 338 | /* TID must be supplied by the caller */ |
587 | static inline void kvmppc_e500_setup_stlbe( | 339 | static inline void kvmppc_e500_setup_stlbe( |
588 | struct kvmppc_vcpu_e500 *vcpu_e500, | 340 | struct kvm_vcpu *vcpu, |
589 | struct kvm_book3e_206_tlb_entry *gtlbe, | 341 | struct kvm_book3e_206_tlb_entry *gtlbe, |
590 | int tsize, struct tlbe_ref *ref, u64 gvaddr, | 342 | int tsize, struct tlbe_ref *ref, u64 gvaddr, |
591 | struct kvm_book3e_206_tlb_entry *stlbe) | 343 | struct kvm_book3e_206_tlb_entry *stlbe) |
592 | { | 344 | { |
593 | pfn_t pfn = ref->pfn; | 345 | pfn_t pfn = ref->pfn; |
346 | u32 pr = vcpu->arch.shared->msr & MSR_PR; | ||
594 | 347 | ||
595 | BUG_ON(!(ref->flags & E500_TLB_VALID)); | 348 | BUG_ON(!(ref->flags & E500_TLB_VALID)); |
596 | 349 | ||
597 | /* Force TS=1 IPROT=0 for all guest mappings. */ | 350 | /* Force IPROT=0 for all guest mappings. */ |
598 | stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID; | 351 | stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID; |
599 | stlbe->mas2 = (gvaddr & MAS2_EPN) | 352 | stlbe->mas2 = (gvaddr & MAS2_EPN) | |
600 | | e500_shadow_mas2_attrib(gtlbe->mas2, | 353 | e500_shadow_mas2_attrib(gtlbe->mas2, pr); |
601 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); | 354 | stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | |
602 | stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) | 355 | e500_shadow_mas3_attrib(gtlbe->mas7_3, pr); |
603 | | e500_shadow_mas3_attrib(gtlbe->mas7_3, | ||
604 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); | ||
605 | } | 356 | } |
606 | 357 | ||
607 | static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | 358 | static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, |
@@ -735,7 +486,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
735 | kvmppc_e500_ref_release(ref); | 486 | kvmppc_e500_ref_release(ref); |
736 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); | 487 | kvmppc_e500_ref_setup(ref, gtlbe, pfn); |
737 | 488 | ||
738 | kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe); | 489 | kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, |
490 | ref, gvaddr, stlbe); | ||
739 | } | 491 | } |
740 | 492 | ||
741 | /* XXX only map the one-one case, for now use TLB0 */ | 493 | /* XXX only map the one-one case, for now use TLB0 */ |
@@ -775,14 +527,6 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
775 | return victim; | 527 | return victim; |
776 | } | 528 | } |
777 | 529 | ||
778 | void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) | ||
779 | { | ||
780 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
781 | |||
782 | /* Recalc shadow pid since MSR changes */ | ||
783 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
784 | } | ||
785 | |||
786 | static inline int kvmppc_e500_gtlbe_invalidate( | 530 | static inline int kvmppc_e500_gtlbe_invalidate( |
787 | struct kvmppc_vcpu_e500 *vcpu_e500, | 531 | struct kvmppc_vcpu_e500 *vcpu_e500, |
788 | int tlbsel, int esel) | 532 | int tlbsel, int esel) |
@@ -810,7 +554,7 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value) | |||
810 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); | 554 | kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); |
811 | 555 | ||
812 | /* Invalidate all vcpu id mappings */ | 556 | /* Invalidate all vcpu id mappings */ |
813 | kvmppc_e500_id_table_reset_all(vcpu_e500); | 557 | kvmppc_e500_tlbil_all(vcpu_e500); |
814 | 558 | ||
815 | return EMULATE_DONE; | 559 | return EMULATE_DONE; |
816 | } | 560 | } |
@@ -843,7 +587,7 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb) | |||
843 | } | 587 | } |
844 | 588 | ||
845 | /* Invalidate all vcpu id mappings */ | 589 | /* Invalidate all vcpu id mappings */ |
846 | kvmppc_e500_id_table_reset_all(vcpu_e500); | 590 | kvmppc_e500_tlbil_all(vcpu_e500); |
847 | 591 | ||
848 | return EMULATE_DONE; | 592 | return EMULATE_DONE; |
849 | } | 593 | } |
@@ -928,9 +672,7 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
928 | int stid; | 672 | int stid; |
929 | 673 | ||
930 | preempt_disable(); | 674 | preempt_disable(); |
931 | stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe), | 675 | stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe); |
932 | get_tlb_tid(gtlbe), | ||
933 | get_cur_pr(&vcpu_e500->vcpu), 0); | ||
934 | 676 | ||
935 | stlbe->mas1 |= MAS1_TID(stid); | 677 | stlbe->mas1 |= MAS1_TID(stid); |
936 | write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); | 678 | write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); |
@@ -940,8 +682,8 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
940 | int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | 682 | int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) |
941 | { | 683 | { |
942 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 684 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
943 | struct kvm_book3e_206_tlb_entry *gtlbe; | 685 | struct kvm_book3e_206_tlb_entry *gtlbe, stlbe; |
944 | int tlbsel, esel; | 686 | int tlbsel, esel, stlbsel, sesel; |
945 | 687 | ||
946 | tlbsel = get_tlb_tlbsel(vcpu); | 688 | tlbsel = get_tlb_tlbsel(vcpu); |
947 | esel = get_tlb_esel(vcpu, tlbsel); | 689 | esel = get_tlb_esel(vcpu, tlbsel); |
@@ -960,8 +702,6 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
960 | 702 | ||
961 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | 703 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ |
962 | if (tlbe_is_host_safe(vcpu, gtlbe)) { | 704 | if (tlbe_is_host_safe(vcpu, gtlbe)) { |
963 | struct kvm_book3e_206_tlb_entry stlbe; | ||
964 | int stlbsel, sesel; | ||
965 | u64 eaddr; | 705 | u64 eaddr; |
966 | u64 raddr; | 706 | u64 raddr; |
967 | 707 | ||
@@ -988,7 +728,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
988 | * are mapped on the fly. */ | 728 | * are mapped on the fly. */ |
989 | stlbsel = 1; | 729 | stlbsel = 1; |
990 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, | 730 | sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, |
991 | raddr >> PAGE_SHIFT, gtlbe, &stlbe); | 731 | raddr >> PAGE_SHIFT, gtlbe, &stlbe); |
992 | break; | 732 | break; |
993 | 733 | ||
994 | default: | 734 | default: |
@@ -1002,6 +742,48 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
1002 | return EMULATE_DONE; | 742 | return EMULATE_DONE; |
1003 | } | 743 | } |
1004 | 744 | ||
745 | static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, | ||
746 | gva_t eaddr, unsigned int pid, int as) | ||
747 | { | ||
748 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
749 | int esel, tlbsel; | ||
750 | |||
751 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | ||
752 | esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); | ||
753 | if (esel >= 0) | ||
754 | return index_of(tlbsel, esel); | ||
755 | } | ||
756 | |||
757 | return -1; | ||
758 | } | ||
759 | |||
760 | /* 'linear_address' is actually an encoding of AS|PID|EADDR . */ | ||
761 | int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | ||
762 | struct kvm_translation *tr) | ||
763 | { | ||
764 | int index; | ||
765 | gva_t eaddr; | ||
766 | u8 pid; | ||
767 | u8 as; | ||
768 | |||
769 | eaddr = tr->linear_address; | ||
770 | pid = (tr->linear_address >> 32) & 0xff; | ||
771 | as = (tr->linear_address >> 40) & 0x1; | ||
772 | |||
773 | index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as); | ||
774 | if (index < 0) { | ||
775 | tr->valid = 0; | ||
776 | return 0; | ||
777 | } | ||
778 | |||
779 | tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr); | ||
780 | /* XXX what does "writeable" and "usermode" even mean? */ | ||
781 | tr->valid = 1; | ||
782 | |||
783 | return 0; | ||
784 | } | ||
785 | |||
786 | |||
1005 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | 787 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
1006 | { | 788 | { |
1007 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); | 789 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
@@ -1065,7 +847,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
1065 | sesel = 0; /* unused */ | 847 | sesel = 0; /* unused */ |
1066 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; | 848 | priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; |
1067 | 849 | ||
1068 | kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K, | 850 | kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K, |
1069 | &priv->ref, eaddr, &stlbe); | 851 | &priv->ref, eaddr, &stlbe); |
1070 | break; | 852 | break; |
1071 | 853 | ||
@@ -1086,48 +868,6 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, | |||
1086 | write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); | 868 | write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); |
1087 | } | 869 | } |
1088 | 870 | ||
1089 | int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, | ||
1090 | gva_t eaddr, unsigned int pid, int as) | ||
1091 | { | ||
1092 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
1093 | int esel, tlbsel; | ||
1094 | |||
1095 | for (tlbsel = 0; tlbsel < 2; tlbsel++) { | ||
1096 | esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as); | ||
1097 | if (esel >= 0) | ||
1098 | return index_of(tlbsel, esel); | ||
1099 | } | ||
1100 | |||
1101 | return -1; | ||
1102 | } | ||
1103 | |||
1104 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) | ||
1105 | { | ||
1106 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
1107 | |||
1108 | if (vcpu->arch.pid != pid) { | ||
1109 | vcpu_e500->pid[0] = vcpu->arch.pid = pid; | ||
1110 | kvmppc_e500_recalc_shadow_pid(vcpu_e500); | ||
1111 | } | ||
1112 | } | ||
1113 | |||
1114 | void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) | ||
1115 | { | ||
1116 | struct kvm_book3e_206_tlb_entry *tlbe; | ||
1117 | |||
1118 | /* Insert large initial mapping for guest. */ | ||
1119 | tlbe = get_entry(vcpu_e500, 1, 0); | ||
1120 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); | ||
1121 | tlbe->mas2 = 0; | ||
1122 | tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK; | ||
1123 | |||
1124 | /* 4K map for serial output. Used by kernel wrapper. */ | ||
1125 | tlbe = get_entry(vcpu_e500, 1, 1); | ||
1126 | tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); | ||
1127 | tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; | ||
1128 | tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; | ||
1129 | } | ||
1130 | |||
1131 | static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) | 871 | static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) |
1132 | { | 872 | { |
1133 | int i; | 873 | int i; |
@@ -1154,6 +894,36 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1154 | vcpu_e500->gtlb_arch = NULL; | 894 | vcpu_e500->gtlb_arch = NULL; |
1155 | } | 895 | } |
1156 | 896 | ||
897 | void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
898 | { | ||
899 | sregs->u.e.mas0 = vcpu->arch.shared->mas0; | ||
900 | sregs->u.e.mas1 = vcpu->arch.shared->mas1; | ||
901 | sregs->u.e.mas2 = vcpu->arch.shared->mas2; | ||
902 | sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3; | ||
903 | sregs->u.e.mas4 = vcpu->arch.shared->mas4; | ||
904 | sregs->u.e.mas6 = vcpu->arch.shared->mas6; | ||
905 | |||
906 | sregs->u.e.mmucfg = vcpu->arch.mmucfg; | ||
907 | sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0]; | ||
908 | sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1]; | ||
909 | sregs->u.e.tlbcfg[2] = 0; | ||
910 | sregs->u.e.tlbcfg[3] = 0; | ||
911 | } | ||
912 | |||
913 | int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
914 | { | ||
915 | if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { | ||
916 | vcpu->arch.shared->mas0 = sregs->u.e.mas0; | ||
917 | vcpu->arch.shared->mas1 = sregs->u.e.mas1; | ||
918 | vcpu->arch.shared->mas2 = sregs->u.e.mas2; | ||
919 | vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3; | ||
920 | vcpu->arch.shared->mas4 = sregs->u.e.mas4; | ||
921 | vcpu->arch.shared->mas6 = sregs->u.e.mas6; | ||
922 | } | ||
923 | |||
924 | return 0; | ||
925 | } | ||
926 | |||
1157 | int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | 927 | int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, |
1158 | struct kvm_config_tlb *cfg) | 928 | struct kvm_config_tlb *cfg) |
1159 | { | 929 | { |
@@ -1237,14 +1007,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, | |||
1237 | vcpu_e500->gtlb_offset[0] = 0; | 1007 | vcpu_e500->gtlb_offset[0] = 0; |
1238 | vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; | 1008 | vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; |
1239 | 1009 | ||
1240 | vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1010 | vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE; |
1011 | |||
1012 | vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | ||
1241 | if (params.tlb_sizes[0] <= 2048) | 1013 | if (params.tlb_sizes[0] <= 2048) |
1242 | vcpu_e500->tlb0cfg |= params.tlb_sizes[0]; | 1014 | vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0]; |
1243 | vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; | 1015 | vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; |
1244 | 1016 | ||
1245 | vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1017 | vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); |
1246 | vcpu_e500->tlb1cfg |= params.tlb_sizes[1]; | 1018 | vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1]; |
1247 | vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; | 1019 | vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; |
1248 | 1020 | ||
1249 | vcpu_e500->shared_tlb_pages = pages; | 1021 | vcpu_e500->shared_tlb_pages = pages; |
1250 | vcpu_e500->num_shared_tlb_pages = num_pages; | 1022 | vcpu_e500->num_shared_tlb_pages = num_pages; |
@@ -1280,6 +1052,7 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, | |||
1280 | 1052 | ||
1281 | int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | 1053 | int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) |
1282 | { | 1054 | { |
1055 | struct kvm_vcpu *vcpu = &vcpu_e500->vcpu; | ||
1283 | int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); | 1056 | int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); |
1284 | int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; | 1057 | int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; |
1285 | 1058 | ||
@@ -1356,20 +1129,17 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) | |||
1356 | if (!vcpu_e500->gtlb_priv[1]) | 1129 | if (!vcpu_e500->gtlb_priv[1]) |
1357 | goto err; | 1130 | goto err; |
1358 | 1131 | ||
1359 | if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) | ||
1360 | goto err; | ||
1361 | |||
1362 | /* Init TLB configuration register */ | 1132 | /* Init TLB configuration register */ |
1363 | vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & | 1133 | vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) & |
1364 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1134 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); |
1365 | vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries; | 1135 | vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries; |
1366 | vcpu_e500->tlb0cfg |= | 1136 | vcpu->arch.tlbcfg[0] |= |
1367 | vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT; | 1137 | vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT; |
1368 | 1138 | ||
1369 | vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & | 1139 | vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) & |
1370 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); | 1140 | ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); |
1371 | vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries; | 1141 | vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[1].entries; |
1372 | vcpu_e500->tlb0cfg |= | 1142 | vcpu->arch.tlbcfg[0] |= |
1373 | vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; | 1143 | vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; |
1374 | 1144 | ||
1375 | return 0; | 1145 | return 0; |
@@ -1384,8 +1154,6 @@ err: | |||
1384 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) | 1154 | void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) |
1385 | { | 1155 | { |
1386 | free_gtlb(vcpu_e500); | 1156 | free_gtlb(vcpu_e500); |
1387 | kvmppc_e500_id_table_free(vcpu_e500); | ||
1388 | |||
1389 | kfree(vcpu_e500->tlb_refs[0]); | 1157 | kfree(vcpu_e500->tlb_refs[0]); |
1390 | kfree(vcpu_e500->tlb_refs[1]); | 1158 | kfree(vcpu_e500->tlb_refs[1]); |
1391 | } | 1159 | } |