diff options
author | Hollis Blanchard <hollisb@us.ibm.com> | 2008-12-02 16:51:56 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-12-31 09:55:09 -0500 |
commit | c5fbdffbda79254047ec83b09c1a61a3655d052a (patch) | |
tree | 4fb1ba38fe722d514b62c284cc618768856eedf8 /arch/powerpc/kvm | |
parent | 7924bd41097ae8991c6d38cef8b1e4058e30d198 (diff) |
KVM: ppc: save and restore guest mappings on context switch
Store shadow TLB entries in memory, but only use it on host context switch
(instead of every guest entry). This improves performance for most workloads on
440 by reducing the guest TLB miss rate.
Signed-off-by: Hollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/44x.c | 7 | ||||
-rw-r--r-- | arch/powerpc/kvm/44x_tlb.c | 58 |
2 files changed, 60 insertions, 5 deletions
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 05d72fc8b478..a66bec57265a 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c | |||
@@ -96,15 +96,12 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |||
96 | 96 | ||
97 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 97 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
98 | { | 98 | { |
99 | kvmppc_44x_tlb_load(vcpu); | ||
99 | } | 100 | } |
100 | 101 | ||
101 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | 102 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) |
102 | { | 103 | { |
103 | /* XXX Since every guest uses TS=1 TID=0/1 mappings, we can't leave any TLB | 104 | kvmppc_44x_tlb_put(vcpu); |
104 | * entries around when we're descheduled, so we must completely flush the | ||
105 | * TLB of all guest mappings. On the other hand, if there is only one | ||
106 | * guest, this flush is completely unnecessary. */ | ||
107 | _tlbia(); | ||
108 | } | 105 | } |
109 | 106 | ||
110 | int kvmppc_core_check_processor_compat(void) | 107 | int kvmppc_core_check_processor_compat(void) |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 2981ebea3d1f..ff16d0e38433 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -73,6 +73,25 @@ static inline void kvmppc_44x_tlbie(unsigned int index) | |||
73 | ); | 73 | ); |
74 | } | 74 | } |
75 | 75 | ||
76 | static inline void kvmppc_44x_tlbre(unsigned int index, | ||
77 | struct kvmppc_44x_tlbe *tlbe) | ||
78 | { | ||
79 | asm volatile( | ||
80 | "tlbre %[word0], %[index], 0\n" | ||
81 | "mfspr %[tid], %[sprn_mmucr]\n" | ||
82 | "andi. %[tid], %[tid], 0xff\n" | ||
83 | "tlbre %[word1], %[index], 1\n" | ||
84 | "tlbre %[word2], %[index], 2\n" | ||
85 | : [word0] "=r"(tlbe->word0), | ||
86 | [word1] "=r"(tlbe->word1), | ||
87 | [word2] "=r"(tlbe->word2), | ||
88 | [tid] "=r"(tlbe->tid) | ||
89 | : [index] "r"(index), | ||
90 | [sprn_mmucr] "i"(SPRN_MMUCR) | ||
91 | : "cc" | ||
92 | ); | ||
93 | } | ||
94 | |||
76 | static inline void kvmppc_44x_tlbwe(unsigned int index, | 95 | static inline void kvmppc_44x_tlbwe(unsigned int index, |
77 | struct kvmppc_44x_tlbe *stlbe) | 96 | struct kvmppc_44x_tlbe *stlbe) |
78 | { | 97 | { |
@@ -116,6 +135,44 @@ static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode) | |||
116 | return attrib; | 135 | return attrib; |
117 | } | 136 | } |
118 | 137 | ||
138 | /* Load shadow TLB back into hardware. */ | ||
139 | void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu) | ||
140 | { | ||
141 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
142 | int i; | ||
143 | |||
144 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
145 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
146 | |||
147 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
148 | kvmppc_44x_tlbwe(i, stlbe); | ||
149 | } | ||
150 | } | ||
151 | |||
152 | static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x, | ||
153 | unsigned int i) | ||
154 | { | ||
155 | vcpu_44x->shadow_tlb_mod[i] = 1; | ||
156 | } | ||
157 | |||
158 | /* Save hardware TLB to the vcpu, and invalidate all guest mappings. */ | ||
159 | void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu) | ||
160 | { | ||
161 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
162 | int i; | ||
163 | |||
164 | for (i = 0; i <= tlb_44x_hwater; i++) { | ||
165 | struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i]; | ||
166 | |||
167 | if (vcpu_44x->shadow_tlb_mod[i]) | ||
168 | kvmppc_44x_tlbre(i, stlbe); | ||
169 | |||
170 | if (get_tlb_v(stlbe) && get_tlb_ts(stlbe)) | ||
171 | kvmppc_44x_tlbie(i); | ||
172 | } | ||
173 | } | ||
174 | |||
175 | |||
119 | /* Search the guest TLB for a matching entry. */ | 176 | /* Search the guest TLB for a matching entry. */ |
120 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, | 177 | int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, |
121 | unsigned int as) | 178 | unsigned int as) |
@@ -283,6 +340,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, u64 asid, | |||
283 | ref->tid = stlbe.tid; | 340 | ref->tid = stlbe.tid; |
284 | 341 | ||
285 | /* Insert shadow mapping into hardware TLB. */ | 342 | /* Insert shadow mapping into hardware TLB. */ |
343 | kvmppc_44x_tlbe_set_modified(vcpu_44x, victim); | ||
286 | kvmppc_44x_tlbwe(victim, &stlbe); | 344 | kvmppc_44x_tlbwe(victim, &stlbe); |
287 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1, | 345 | KVMTRACE_5D(STLB_WRITE, vcpu, victim, stlbe.tid, stlbe.word0, stlbe.word1, |
288 | stlbe.word2, handler); | 346 | stlbe.word2, handler); |