aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLiu Yu <yu.liu@freescale.com>2011-06-14 19:35:14 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 06:16:39 -0400
commitdd9ebf1f94354b010f2ac7a98bf69168636cb08e (patch)
treed7a6cbe2b31cd7349913d289b715fa1086ba0a1a /arch
parent08b7fa92b9250eab0f493f7721977e781a887b3d (diff)
KVM: PPC: e500: Add shadow PID support
Dynamically assign host PIDs to guest PIDs, splitting each guest PID into multiple host (shadow) PIDs based on kernel/user and MSR[IS/DS]. Use both PID0 and PID1 so that the shadow PIDs for the right mode can be selected, that correspond both to guest TID = zero and guest TID = guest PID. This allows us to significantly reduce the frequency of needing to invalidate the entire TLB. When the guest mode or PID changes, we just update the host PID0/PID1. And since the allocation of shadow PIDs is global, multiple guests can share the TLB without conflict. Note that KVM does not yet support the guest setting PID1 or PID2 to a value other than zero. This will need to be fixed for nested KVM to work. Until then, we enforce the requirement for guest PID1/PID2 to stay zero by failing the emulation if the guest tries to set them to something else. Signed-off-by: Liu Yu <yu.liu@freescale.com> Signed-off-by: Scott Wood <scottwood@freescale.com> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h8
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kvm/44x_tlb.c4
-rw-r--r--arch/powerpc/kvm/booke.c11
-rw-r--r--arch/powerpc/kvm/booke.h1
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S11
-rw-r--r--arch/powerpc/kvm/e500_emulate.c4
-rw-r--r--arch/powerpc/kvm/e500_tlb.c312
-rw-r--r--arch/powerpc/kvm/e500_tlb.h13
10 files changed, 334 insertions, 32 deletions
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index 4a6d77a8b8a0..adbfca9dd100 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 * 3 *
4 * Author: Yu Liu, <yu.liu@freescale.com> 4 * Author: Yu Liu, <yu.liu@freescale.com>
5 * 5 *
@@ -37,6 +37,8 @@ struct tlbe_priv {
37 unsigned int flags; /* E500_TLB_* */ 37 unsigned int flags; /* E500_TLB_* */
38}; 38};
39 39
40struct vcpu_id_table;
41
40struct kvmppc_vcpu_e500 { 42struct kvmppc_vcpu_e500 {
41 /* Unmodified copy of the guest's TLB. */ 43 /* Unmodified copy of the guest's TLB. */
42 struct tlbe *gtlb_arch[E500_TLB_NUM]; 44 struct tlbe *gtlb_arch[E500_TLB_NUM];
@@ -59,6 +61,10 @@ struct kvmppc_vcpu_e500 {
59 u32 mas5; 61 u32 mas5;
60 u32 mas6; 62 u32 mas6;
61 u32 mas7; 63 u32 mas7;
64
65 /* vcpu id table */
66 struct vcpu_id_table *idt;
67
62 u32 l1csr0; 68 u32 l1csr0;
63 u32 l1csr1; 69 u32 l1csr1;
64 u32 hid0; 70 u32 hid0;
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index c4ce1054b866..6e05b2d13683 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -255,6 +255,7 @@ struct kvm_vcpu_arch {
255 u32 pvr; 255 u32 pvr;
256 256
257 u32 shadow_pid; 257 u32 shadow_pid;
258 u32 shadow_pid1;
258 u32 pid; 259 u32 pid;
259 u32 swap_pid; 260 u32 swap_pid;
260 261
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index ecd2b3ad7ff6..faf846131f45 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -402,6 +402,7 @@ int main(void)
402 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 402 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
403 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 403 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
404 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 404 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
405 DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
405 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); 406 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
406 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); 407 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
407 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 408 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 5f3cff83e089..33aa715dab28 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -387,8 +387,10 @@ static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
387 } 387 }
388} 388}
389 389
390void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode) 390void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
391{ 391{
392 int usermode = vcpu->arch.shared->msr & MSR_PR;
393
392 vcpu->arch.shadow_pid = !usermode; 394 vcpu->arch.shadow_pid = !usermode;
393} 395}
394 396
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 4538956daecf..9f2e4a5e1c4d 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -113,15 +113,18 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
113} 113}
114#endif 114#endif
115 115
116/* Helper function for "full" MSR writes. No need to call this if only EE is 116/*
117 * changing. */ 117 * Helper function for "full" MSR writes. No need to call this if only
118 * EE/CE/ME/DE/RI are changing.
119 */
118void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) 120void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
119{ 121{
120 if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR)) 122 u32 old_msr = vcpu->arch.shared->msr;
121 kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
122 123
123 vcpu->arch.shared->msr = new_msr; 124 vcpu->arch.shared->msr = new_msr;
124 125
126 kvmppc_mmu_msr_notify(vcpu, old_msr);
127
125 if (vcpu->arch.shared->msr & MSR_WE) { 128 if (vcpu->arch.shared->msr & MSR_WE) {
126 kvm_vcpu_block(vcpu); 129 kvm_vcpu_block(vcpu);
127 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 130 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 0fa1732ddcb7..8e1fe33d64e5 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -53,6 +53,7 @@
53extern unsigned long kvmppc_booke_handlers; 53extern unsigned long kvmppc_booke_handlers;
54 54
55void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr); 55void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
56void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
56 57
57int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 58int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
58 unsigned int inst, int *advance); 59 unsigned int inst, int *advance);
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 8cb3dfe29f75..42f2fb1f66e9 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -191,6 +191,12 @@ _GLOBAL(kvmppc_resume_host)
191 lwz r3, VCPU_HOST_PID(r4) 191 lwz r3, VCPU_HOST_PID(r4)
192 mtspr SPRN_PID, r3 192 mtspr SPRN_PID, r3
193 193
194#ifdef CONFIG_FSL_BOOKE
195 /* we cheat and know that Linux doesn't use PID1 which is always 0 */
196 lis r3, 0
197 mtspr SPRN_PID1, r3
198#endif
199
194 /* Restore host IVPR before re-enabling interrupts. We cheat and know 200 /* Restore host IVPR before re-enabling interrupts. We cheat and know
195 * that Linux IVPR is always 0xc0000000. */ 201 * that Linux IVPR is always 0xc0000000. */
196 lis r3, 0xc000 202 lis r3, 0xc000
@@ -365,6 +371,11 @@ lightweight_exit:
365 lwz r3, VCPU_SHADOW_PID(r4) 371 lwz r3, VCPU_SHADOW_PID(r4)
366 mtspr SPRN_PID, r3 372 mtspr SPRN_PID, r3
367 373
374#ifdef CONFIG_FSL_BOOKE
375 lwz r3, VCPU_SHADOW_PID1(r4)
376 mtspr SPRN_PID1, r3
377#endif
378
368#ifdef CONFIG_44x 379#ifdef CONFIG_44x
369 iccci 0, 0 /* XXX hack */ 380 iccci 0, 0 /* XXX hack */
370#endif 381#endif
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 69cd665a0caf..d48ae396f41e 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -81,8 +81,12 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
81 kvmppc_set_pid(vcpu, spr_val); 81 kvmppc_set_pid(vcpu, spr_val);
82 break; 82 break;
83 case SPRN_PID1: 83 case SPRN_PID1:
84 if (spr_val != 0)
85 return EMULATE_FAIL;
84 vcpu_e500->pid[1] = spr_val; break; 86 vcpu_e500->pid[1] = spr_val; break;
85 case SPRN_PID2: 87 case SPRN_PID2:
88 if (spr_val != 0)
89 return EMULATE_FAIL;
86 vcpu_e500->pid[2] = spr_val; break; 90 vcpu_e500->pid[2] = spr_val; break;
87 case SPRN_MAS0: 91 case SPRN_MAS0:
88 vcpu_e500->mas0 = spr_val; break; 92 vcpu_e500->mas0 = spr_val; break;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 9d1e28d443c4..ea394571bbb6 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -28,8 +28,196 @@
28 28
29#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) 29#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
30 30
31struct id {
32 unsigned long val;
33 struct id **pentry;
34};
35
36#define NUM_TIDS 256
37
38/*
39 * This table provide mappings from:
40 * (guestAS,guestTID,guestPR) --> ID of physical cpu
41 * guestAS [0..1]
42 * guestTID [0..255]
43 * guestPR [0..1]
44 * ID [1..255]
45 * Each vcpu keeps one vcpu_id_table.
46 */
47struct vcpu_id_table {
48 struct id id[2][NUM_TIDS][2];
49};
50
51/*
52 * This table provide reversed mappings of vcpu_id_table:
53 * ID --> address of vcpu_id_table item.
54 * Each physical core has one pcpu_id_table.
55 */
56struct pcpu_id_table {
57 struct id *entry[NUM_TIDS];
58};
59
60static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
61
62/* This variable keeps last used shadow ID on local core.
63 * The valid range of shadow ID is [1..255] */
64static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
65
31static unsigned int tlb1_entry_num; 66static unsigned int tlb1_entry_num;
32 67
68/*
69 * Allocate a free shadow id and setup a valid sid mapping in given entry.
70 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
71 *
72 * The caller must have preemption disabled, and keep it that way until
73 * it has finished with the returned shadow id (either written into the
74 * TLB or arch.shadow_pid, or discarded).
75 */
76static inline int local_sid_setup_one(struct id *entry)
77{
78 unsigned long sid;
79 int ret = -1;
80
81 sid = ++(__get_cpu_var(pcpu_last_used_sid));
82 if (sid < NUM_TIDS) {
83 __get_cpu_var(pcpu_sids).entry[sid] = entry;
84 entry->val = sid;
85 entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
86 ret = sid;
87 }
88
89 /*
90 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
91 * the caller will invalidate everything and start over.
92 *
93 * sid > NUM_TIDS indicates a race, which we disable preemption to
94 * avoid.
95 */
96 WARN_ON(sid > NUM_TIDS);
97
98 return ret;
99}
100
101/*
102 * Check if given entry contain a valid shadow id mapping.
103 * An ID mapping is considered valid only if
104 * both vcpu and pcpu know this mapping.
105 *
106 * The caller must have preemption disabled, and keep it that way until
107 * it has finished with the returned shadow id (either written into the
108 * TLB or arch.shadow_pid, or discarded).
109 */
110static inline int local_sid_lookup(struct id *entry)
111{
112 if (entry && entry->val != 0 &&
113 __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
114 entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
115 return entry->val;
116 return -1;
117}
118
119/* Invalidate all id mappings on local core */
120static inline void local_sid_destroy_all(void)
121{
122 preempt_disable();
123 __get_cpu_var(pcpu_last_used_sid) = 0;
124 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
125 preempt_enable();
126}
127
128static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
129{
130 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
131 return vcpu_e500->idt;
132}
133
134static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
135{
136 kfree(vcpu_e500->idt);
137}
138
139/* Invalidate all mappings on vcpu */
140static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
141{
142 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
143
144 /* Update shadow pid when mappings are changed */
145 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
146}
147
148/* Invalidate one ID mapping on vcpu */
149static inline void kvmppc_e500_id_table_reset_one(
150 struct kvmppc_vcpu_e500 *vcpu_e500,
151 int as, int pid, int pr)
152{
153 struct vcpu_id_table *idt = vcpu_e500->idt;
154
155 BUG_ON(as >= 2);
156 BUG_ON(pid >= NUM_TIDS);
157 BUG_ON(pr >= 2);
158
159 idt->id[as][pid][pr].val = 0;
160 idt->id[as][pid][pr].pentry = NULL;
161
162 /* Update shadow pid when mappings are changed */
163 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
164}
165
166/*
167 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
168 * This function first lookup if a valid mapping exists,
169 * if not, then creates a new one.
170 *
171 * The caller must have preemption disabled, and keep it that way until
172 * it has finished with the returned shadow id (either written into the
173 * TLB or arch.shadow_pid, or discarded).
174 */
175static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
176 unsigned int as, unsigned int gid,
177 unsigned int pr, int avoid_recursion)
178{
179 struct vcpu_id_table *idt = vcpu_e500->idt;
180 int sid;
181
182 BUG_ON(as >= 2);
183 BUG_ON(gid >= NUM_TIDS);
184 BUG_ON(pr >= 2);
185
186 sid = local_sid_lookup(&idt->id[as][gid][pr]);
187
188 while (sid <= 0) {
189 /* No mapping yet */
190 sid = local_sid_setup_one(&idt->id[as][gid][pr]);
191 if (sid <= 0) {
192 _tlbil_all();
193 local_sid_destroy_all();
194 }
195
196 /* Update shadow pid when mappings are changed */
197 if (!avoid_recursion)
198 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
199 }
200
201 return sid;
202}
203
204/* Map guest pid to shadow.
205 * We use PID to keep shadow of current guest non-zero PID,
206 * and use PID1 to keep shadow of guest zero PID.
207 * So that guest tlbe with TID=0 can be accessed at any time */
208void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
209{
210 preempt_disable();
211 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
212 get_cur_as(&vcpu_e500->vcpu),
213 get_cur_pid(&vcpu_e500->vcpu),
214 get_cur_pr(&vcpu_e500->vcpu), 1);
215 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
216 get_cur_as(&vcpu_e500->vcpu), 0,
217 get_cur_pr(&vcpu_e500->vcpu), 1);
218 preempt_enable();
219}
220
33void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) 221void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
34{ 222{
35 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 223 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -134,14 +322,19 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
134 322
135void kvmppc_map_magic(struct kvm_vcpu *vcpu) 323void kvmppc_map_magic(struct kvm_vcpu *vcpu)
136{ 324{
325 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
137 struct tlbe magic; 326 struct tlbe magic;
138 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 327 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
328 unsigned int stid;
139 pfn_t pfn; 329 pfn_t pfn;
140 330
141 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT; 331 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
142 get_page(pfn_to_page(pfn)); 332 get_page(pfn_to_page(pfn));
143 333
144 magic.mas1 = MAS1_VALID | MAS1_TS | 334 preempt_disable();
335 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
336
337 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
145 MAS1_TSIZE(BOOK3E_PAGESZ_4K); 338 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
146 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; 339 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
147 magic.mas3 = (pfn << PAGE_SHIFT) | 340 magic.mas3 = (pfn << PAGE_SHIFT) |
@@ -149,15 +342,76 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
149 magic.mas7 = pfn >> (32 - PAGE_SHIFT); 342 magic.mas7 = pfn >> (32 - PAGE_SHIFT);
150 343
151 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); 344 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
345 preempt_enable();
152} 346}
153 347
154void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu) 348void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
155{ 349{
350 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
351
352 /* Shadow PID may be expired on local core */
353 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
156} 354}
157 355
158void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu) 356void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
159{ 357{
160 _tlbil_all(); 358}
359
360static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
361 int tlbsel, int esel)
362{
363 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
364 struct vcpu_id_table *idt = vcpu_e500->idt;
365 unsigned int pr, tid, ts, pid;
366 u32 val, eaddr;
367 unsigned long flags;
368
369 ts = get_tlb_ts(gtlbe);
370 tid = get_tlb_tid(gtlbe);
371
372 preempt_disable();
373
374 /* One guest ID may be mapped to two shadow IDs */
375 for (pr = 0; pr < 2; pr++) {
376 /*
377 * The shadow PID can have a valid mapping on at most one
378 * host CPU. In the common case, it will be valid on this
379 * CPU, in which case (for TLB0) we do a local invalidation
380 * of the specific address.
381 *
382 * If the shadow PID is not valid on the current host CPU, or
383 * if we're invalidating a TLB1 entry, we invalidate the
384 * entire shadow PID.
385 */
386 if (tlbsel == 1 ||
387 (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
388 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
389 continue;
390 }
391
392 /*
393 * The guest is invalidating a TLB0 entry which is in a PID
394 * that has a valid shadow mapping on this host CPU. We
395 * search host TLB0 to invalidate it's shadow TLB entry,
396 * similar to __tlbil_va except that we need to look in AS1.
397 */
398 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
399 eaddr = get_tlb_eaddr(gtlbe);
400
401 local_irq_save(flags);
402
403 mtspr(SPRN_MAS6, val);
404 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
405 val = mfspr(SPRN_MAS1);
406 if (val & MAS1_VALID) {
407 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
408 asm volatile("tlbwe");
409 }
410
411 local_irq_restore(flags);
412 }
413
414 preempt_enable();
161} 415}
162 416
163/* Search the guest TLB for a matching entry. */ 417/* Search the guest TLB for a matching entry. */
@@ -216,12 +470,6 @@ static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
216 } 470 }
217} 471}
218 472
219static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
220 int esel)
221{
222 mtspr(SPRN_MMUCSR0, MMUCSR0_TLB1FI);
223}
224
225static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, 473static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
226 unsigned int eaddr, int as) 474 unsigned int eaddr, int as)
227{ 475{
@@ -255,10 +503,15 @@ static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
255 u64 gvaddr, struct tlbe *stlbe) 503 u64 gvaddr, struct tlbe *stlbe)
256{ 504{
257 pfn_t pfn = priv->pfn; 505 pfn_t pfn = priv->pfn;
506 unsigned int stid;
507
508 stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
509 get_tlb_tid(gtlbe),
510 get_cur_pr(&vcpu_e500->vcpu), 0);
258 511
259 /* Force TS=1 IPROT=0 for all guest mappings. */ 512 /* Force TS=1 IPROT=0 for all guest mappings. */
260 stlbe->mas1 = MAS1_TSIZE(tsize) 513 stlbe->mas1 = MAS1_TSIZE(tsize)
261 | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; 514 | MAS1_TID(stid) | MAS1_TS | MAS1_VALID;
262 stlbe->mas2 = (gvaddr & MAS2_EPN) 515 stlbe->mas2 = (gvaddr & MAS2_EPN)
263 | e500_shadow_mas2_attrib(gtlbe->mas2, 516 | e500_shadow_mas2_attrib(gtlbe->mas2,
264 vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 517 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
@@ -414,14 +667,12 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
414 return victim; 667 return victim;
415} 668}
416 669
417/* Invalidate all guest kernel mappings when enter usermode, 670void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
418 * so that when they fault back in they will get the
419 * proper permission bits. */
420void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
421{ 671{
422 if (usermode) { 672 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
423 _tlbil_all(); 673
424 } 674 /* Recalc shadow pid since MSR changes */
675 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
425} 676}
426 677
427static inline int kvmppc_e500_gtlbe_invalidate( 678static inline int kvmppc_e500_gtlbe_invalidate(
@@ -449,7 +700,8 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
449 for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++) 700 for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
450 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); 701 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
451 702
452 _tlbil_all(); 703 /* Invalidate all vcpu id mappings */
704 kvmppc_e500_id_table_reset_all(vcpu_e500);
453 705
454 return EMULATE_DONE; 706 return EMULATE_DONE;
455} 707}
@@ -480,7 +732,8 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
480 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); 732 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
481 } 733 }
482 734
483 _tlbil_all(); 735 /* Invalidate all vcpu id mappings */
736 kvmppc_e500_id_table_reset_all(vcpu_e500);
484 737
485 return EMULATE_DONE; 738 return EMULATE_DONE;
486} 739}
@@ -564,8 +817,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
564 817
565 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 818 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
566 819
567 if (get_tlb_v(gtlbe) && tlbsel == 1) 820 if (get_tlb_v(gtlbe))
568 kvmppc_e500_tlb1_invalidate(vcpu_e500, esel); 821 kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
569 822
570 gtlbe->mas1 = vcpu_e500->mas1; 823 gtlbe->mas1 = vcpu_e500->mas1;
571 gtlbe->mas2 = vcpu_e500->mas2; 824 gtlbe->mas2 = vcpu_e500->mas2;
@@ -582,6 +835,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
582 u64 eaddr; 835 u64 eaddr;
583 u64 raddr; 836 u64 raddr;
584 837
838 preempt_disable();
585 switch (tlbsel) { 839 switch (tlbsel) {
586 case 0: 840 case 0:
587 /* TLB0 */ 841 /* TLB0 */
@@ -611,6 +865,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
611 BUG(); 865 BUG();
612 } 866 }
613 write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe); 867 write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
868 preempt_enable();
614 } 869 }
615 870
616 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); 871 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
@@ -672,6 +927,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
672 927
673 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 928 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
674 929
930 preempt_disable();
675 switch (tlbsel) { 931 switch (tlbsel) {
676 case 0: 932 case 0:
677 stlbsel = 0; 933 stlbsel = 0;
@@ -697,6 +953,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
697 } 953 }
698 954
699 write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe); 955 write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
956 preempt_enable();
700} 957}
701 958
702int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, 959int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
@@ -718,8 +975,10 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
718{ 975{
719 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 976 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
720 977
721 vcpu_e500->pid[0] = vcpu->arch.shadow_pid = 978 if (vcpu->arch.pid != pid) {
722 vcpu->arch.pid = pid; 979 vcpu_e500->pid[0] = vcpu->arch.pid = pid;
980 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
981 }
723} 982}
724 983
725void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) 984void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -767,6 +1026,9 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
767 if (vcpu_e500->gtlb_priv[1] == NULL) 1026 if (vcpu_e500->gtlb_priv[1] == NULL)
768 goto err_out_priv0; 1027 goto err_out_priv0;
769 1028
1029 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
1030 goto err_out_priv1;
1031
770 /* Init TLB configuration register */ 1032 /* Init TLB configuration register */
771 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL; 1033 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
772 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0]; 1034 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
@@ -775,6 +1037,8 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
775 1037
776 return 0; 1038 return 0;
777 1039
1040err_out_priv1:
1041 kfree(vcpu_e500->gtlb_priv[1]);
778err_out_priv0: 1042err_out_priv0:
779 kfree(vcpu_e500->gtlb_priv[0]); 1043 kfree(vcpu_e500->gtlb_priv[0]);
780err_out_guest1: 1044err_out_guest1:
@@ -797,9 +1061,7 @@ void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
797 kvmppc_e500_priv_release(priv); 1061 kvmppc_e500_priv_release(priv);
798 } 1062 }
799 1063
800 /* discard all guest mapping */ 1064 kvmppc_e500_id_table_free(vcpu_e500);
801 _tlbil_all();
802
803 kfree(vcpu_e500->gtlb_arch[1]); 1065 kfree(vcpu_e500->gtlb_arch[1]);
804 kfree(vcpu_e500->gtlb_arch[0]); 1066 kfree(vcpu_e500->gtlb_arch[0]);
805} 1067}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 458946b4775d..59b88e99a235 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 * 3 *
4 * Author: Yu Liu, yu.liu@freescale.com 4 * Author: Yu Liu, yu.liu@freescale.com
5 * 5 *
@@ -55,6 +55,7 @@ extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
55extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *); 55extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *);
56extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *); 56extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *);
57extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *); 57extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
58extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
58 59
59/* TLB helper functions */ 60/* TLB helper functions */
60static inline unsigned int get_tlb_size(const struct tlbe *tlbe) 61static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
@@ -110,6 +111,16 @@ static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
110 return vcpu->arch.pid & 0xff; 111 return vcpu->arch.pid & 0xff;
111} 112}
112 113
114static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
115{
116 return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
117}
118
119static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
120{
121 return !!(vcpu->arch.shared->msr & MSR_PR);
122}
123
113static inline unsigned int get_cur_spid( 124static inline unsigned int get_cur_spid(
114 const struct kvmppc_vcpu_e500 *vcpu_e500) 125 const struct kvmppc_vcpu_e500 *vcpu_e500)
115{ 126{