aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/kvm_44x.h6
-rw-r--r--arch/powerpc/kvm/44x.c7
-rw-r--r--arch/powerpc/kvm/44x_tlb.c58
3 files changed, 66 insertions, 5 deletions
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h
index e770ea2bbb1..f49031b632c 100644
--- a/arch/powerpc/include/asm/kvm_44x.h
+++ b/arch/powerpc/include/asm/kvm_44x.h
@@ -42,6 +42,10 @@ struct kvmppc_vcpu_44x {
42 /* References to guest pages in the hardware TLB. */ 42 /* References to guest pages in the hardware TLB. */
43 struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE]; 43 struct kvmppc_44x_shadow_ref shadow_refs[PPC44x_TLB_SIZE];
44 44
45 /* State of the shadow TLB at guest context switch time. */
46 struct kvmppc_44x_tlbe shadow_tlb[PPC44x_TLB_SIZE];
47 u8 shadow_tlb_mod[PPC44x_TLB_SIZE];
48
45 struct kvm_vcpu vcpu; 49 struct kvm_vcpu vcpu;
46}; 50};
47 51
@@ -51,5 +55,7 @@ static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu)
51} 55}
52 56
53void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid); 57void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid);
58void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu);
59void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu);
54 60
55#endif /* __ASM_44X_H__ */ 61#endif /* __ASM_44X_H__ */
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 05d72fc8b47..a66bec57265 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -96,15 +96,12 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
96 96
97void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 97void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
98{ 98{
99 kvmppc_44x_tlb_load(vcpu);
99} 100}
100 101
101void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 102void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
102{ 103{
103 /* XXX Since every guest uses TS=1 TID=0/1 mappings, we can't leave any TLB 104 kvmppc_44x_tlb_put(vcpu);
104 * entries around when we're descheduled, so we must completely flush the
105 * TLB of all guest mappings. On the other hand, if there is only one
106 * guest, this flush is completely unnecessary. */
107 _tlbia();
108} 105}
109 106
110int kvmppc_core_check_processor_compat(void) 107int kvmppc_core_check_processor_compat(void)
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 2981ebea3d1..ff16d0e3843 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -73,6 +73,25 @@ static inline void kvmppc_44x_tlbie(unsigned int index)
73 ); 73 );
74} 74}
75 75
76static inline void kvmppc_44x_tlbre(unsigned int index,
77 struct kvmppc_44x_tlbe *tlbe)
78{
79 asm volatile(
80 "tlbre %[word0], %[index], 0\n"
81 "mfspr %[tid], %[sprn_mmucr]\n"
82 "andi. %[tid], %[tid], 0xff\n"
83 "tlbre %[word1], %[index], 1\n"
84 "tlbre %[word2], %[index], 2\n"
85 : [word0] "=r"(tlbe->word0),
86 [word1] "=r"(tlbe->word1),
87 [word2] "=r"(tlbe->word2),
88 [tid] "=r"(tlbe->tid)
89 : [index] "r"(index),
90 [sprn_mmucr] "i"(SPRN_MMUCR)
91 : "cc"
92 );
93}
94
76static inline void kvmppc_44x_tlbwe(unsigned int index, 95static inline void kvmppc_44x_tlbwe(unsigned int index,
77 struct kvmppc_44x_tlbe *stlbe) 96 struct kvmppc_44x_tlbe *stlbe)
78{ 97{
@@ -116,6 +135,44 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
116 return attrib; 135 return attrib;
117} 136}
118 137
138/* Load shadow TLB back into hardware. */
139void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
140{
141 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
142 int i;
143
144 for (i = 0; i <= tlb_44x_hwater; i++) {
145 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
146
147 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
148 kvmppc_44x_tlbwe(i, stlbe);
149 }
150}
151
152static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
153 unsigned int i)
154{
155 vcpu_44x->shadow_tlb_mod[i] = 1;
156}
157
158/* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
159void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
160{
161 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
162 int i;
163
164 for (i = 0; i <= tlb_44x_hwater; i++) {
165 struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
166
167 if (vcpu_44x->shadow_tlb_mod[i])
168 kvmppc_44x_tlbre(i, stlbe);
169
170 if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
171 kvmppc_44x_tlbie(i);
172 }
173}
174
175
119/* Search the guest TLB for a matching entry. */ 176/* Search the guest TLB for a matching entry. */
120int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, 177int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
121 unsigned int as) 178 unsigned int as)
@@ -283,6 +340,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid,
283 ref->tid = stlbe.tid; 340 ref->tid = stlbe.tid;
284 341
285 /* Insert shadow mapping into hardware TLB. */ 342 /* Insert shadow mapping into hardware TLB. */
343 kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
286 kvmppc_44x_tlbwe(victim, &stlbe); 344 kvmppc_44x_tlbwe(victim, &stlbe);
287 KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1, 345 KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1,
288 stlbe.word2, handler); 346 stlbe.word2, handler);