diff options
author | Alexander Graf <agraf@suse.de> | 2014-05-15 08:36:05 -0400 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-05-30 08:26:30 -0400 |
commit | 207438d4e21e05728a8a58b5e25b0f6553260068 (patch) | |
tree | e26c500f44108f4ea3528988459b76ae3bf5d2a3 | |
parent | 000a25ddb7106cdcb34e7f6c7547e5b2354b6645 (diff) |
KVM: PPC: Book3S PR: Use SLB entry 0
We didn't make use of SLB entry 0 because ... of no good reason. SLB entry 0
will always be used by the Linux linear SLB entry, so the fact that slbia
does not invalidate it doesn't matter as we overwrite SLB 0 on exit anyway.
Just enable use of SLB entry 0 for our shadow SLB code.
Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_host.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_slb.S | 3 |
2 files changed, 6 insertions, 8 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index e2efb85c65a3..0ac98392f363 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -271,11 +271,8 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) | |||
271 | int found_inval = -1; | 271 | int found_inval = -1; |
272 | int r; | 272 | int r; |
273 | 273 | ||
274 | if (!svcpu->slb_max) | ||
275 | svcpu->slb_max = 1; | ||
276 | |||
277 | /* Are we overwriting? */ | 274 | /* Are we overwriting? */ |
278 | for (i = 1; i < svcpu->slb_max; i++) { | 275 | for (i = 0; i < svcpu->slb_max; i++) { |
279 | if (!(svcpu->slb[i].esid & SLB_ESID_V)) | 276 | if (!(svcpu->slb[i].esid & SLB_ESID_V)) |
280 | found_inval = i; | 277 | found_inval = i; |
281 | else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { | 278 | else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { |
@@ -285,7 +282,7 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) | |||
285 | } | 282 | } |
286 | 283 | ||
287 | /* Found a spare entry that was invalidated before */ | 284 | /* Found a spare entry that was invalidated before */ |
288 | if (found_inval > 0) { | 285 | if (found_inval >= 0) { |
289 | r = found_inval; | 286 | r = found_inval; |
290 | goto out; | 287 | goto out; |
291 | } | 288 | } |
@@ -359,7 +356,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) | |||
359 | ulong seg_mask = -seg_size; | 356 | ulong seg_mask = -seg_size; |
360 | int i; | 357 | int i; |
361 | 358 | ||
362 | for (i = 1; i < svcpu->slb_max; i++) { | 359 | for (i = 0; i < svcpu->slb_max; i++) { |
363 | if ((svcpu->slb[i].esid & SLB_ESID_V) && | 360 | if ((svcpu->slb[i].esid & SLB_ESID_V) && |
364 | (svcpu->slb[i].esid & seg_mask) == ea) { | 361 | (svcpu->slb[i].esid & seg_mask) == ea) { |
365 | /* Invalidate this entry */ | 362 | /* Invalidate this entry */ |
@@ -373,7 +370,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) | |||
373 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | 370 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) |
374 | { | 371 | { |
375 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 372 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
376 | svcpu->slb_max = 1; | 373 | svcpu->slb_max = 0; |
377 | svcpu->slb[0].esid = 0; | 374 | svcpu->slb[0].esid = 0; |
378 | svcpu_put(svcpu); | 375 | svcpu_put(svcpu); |
379 | } | 376 | } |
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index 596140e5c889..84c52c6b5837 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S | |||
@@ -138,7 +138,8 @@ slb_do_enter: | |||
138 | 138 | ||
139 | /* Restore bolted entries from the shadow and fix it along the way */ | 139 | /* Restore bolted entries from the shadow and fix it along the way */ |
140 | 140 | ||
141 | /* We don't store anything in entry 0, so we don't need to take care of it */ | 141 | li r0, r0 |
142 | slbmte r0, r0 | ||
142 | slbia | 143 | slbia |
143 | isync | 144 | isync |
144 | 145 | ||