diff options
| -rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 6 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu.c | 60 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_host.c | 17 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 3 |
4 files changed, 66 insertions, 20 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 349ed85c7d61..08891d07aeb6 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
| @@ -107,8 +107,9 @@ struct kvmppc_vcpu_book3s { | |||
| 107 | #define CONTEXT_GUEST 1 | 107 | #define CONTEXT_GUEST 1 |
| 108 | #define CONTEXT_GUEST_END 2 | 108 | #define CONTEXT_GUEST_END 2 |
| 109 | 109 | ||
| 110 | #define VSID_REAL 0x1fffffffffc00000ULL | 110 | #define VSID_REAL 0x0fffffffffc00000ULL |
| 111 | #define VSID_BAT 0x1fffffffffb00000ULL | 111 | #define VSID_BAT 0x0fffffffffb00000ULL |
| 112 | #define VSID_1T 0x1000000000000000ULL | ||
| 112 | #define VSID_REAL_DR 0x2000000000000000ULL | 113 | #define VSID_REAL_DR 0x2000000000000000ULL |
| 113 | #define VSID_REAL_IR 0x4000000000000000ULL | 114 | #define VSID_REAL_IR 0x4000000000000000ULL |
| 114 | #define VSID_PR 0x8000000000000000ULL | 115 | #define VSID_PR 0x8000000000000000ULL |
| @@ -123,6 +124,7 @@ extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); | |||
| 123 | extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); | 124 | extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); |
| 124 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); | 125 | extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); |
| 125 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); | 126 | extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); |
| 127 | extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); | ||
| 126 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); | 128 | extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); |
| 127 | extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run, | 129 | extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run, |
| 128 | struct kvm_vcpu *vcpu, unsigned long addr, | 130 | struct kvm_vcpu *vcpu, unsigned long addr, |
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 2e93bb50a71c..ee435ba6b92a 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <asm/tlbflush.h> | 26 | #include <asm/tlbflush.h> |
| 27 | #include <asm/kvm_ppc.h> | 27 | #include <asm/kvm_ppc.h> |
| 28 | #include <asm/kvm_book3s.h> | 28 | #include <asm/kvm_book3s.h> |
| 29 | #include <asm/mmu-hash64.h> | ||
| 29 | 30 | ||
| 30 | /* #define DEBUG_MMU */ | 31 | /* #define DEBUG_MMU */ |
| 31 | 32 | ||
| @@ -76,6 +77,24 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( | |||
| 76 | return NULL; | 77 | return NULL; |
| 77 | } | 78 | } |
| 78 | 79 | ||
| 80 | static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe) | ||
| 81 | { | ||
| 82 | return slbe->tb ? SID_SHIFT_1T : SID_SHIFT; | ||
| 83 | } | ||
| 84 | |||
| 85 | static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe) | ||
| 86 | { | ||
| 87 | return (1ul << kvmppc_slb_sid_shift(slbe)) - 1; | ||
| 88 | } | ||
| 89 | |||
| 90 | static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr) | ||
| 91 | { | ||
| 92 | eaddr &= kvmppc_slb_offset_mask(slb); | ||
| 93 | |||
| 94 | return (eaddr >> VPN_SHIFT) | | ||
| 95 | ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT)); | ||
| 96 | } | ||
| 97 | |||
| 79 | static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, | 98 | static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, |
| 80 | bool data) | 99 | bool data) |
| 81 | { | 100 | { |
| @@ -85,11 +104,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
| 85 | if (!slb) | 104 | if (!slb) |
| 86 | return 0; | 105 | return 0; |
| 87 | 106 | ||
| 88 | if (slb->tb) | 107 | return kvmppc_slb_calc_vpn(slb, eaddr); |
| 89 | return (((u64)eaddr >> 12) & 0xfffffff) | | ||
| 90 | (((u64)slb->vsid) << 28); | ||
| 91 | |||
| 92 | return (((u64)eaddr >> 12) & 0xffff) | (((u64)slb->vsid) << 16); | ||
| 93 | } | 108 | } |
| 94 | 109 | ||
| 95 | static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) | 110 | static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) |
| @@ -100,7 +115,8 @@ static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) | |||
| 100 | static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) | 115 | static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) |
| 101 | { | 116 | { |
| 102 | int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); | 117 | int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); |
| 103 | return ((eaddr & 0xfffffff) >> p); | 118 | |
| 119 | return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); | ||
| 104 | } | 120 | } |
| 105 | 121 | ||
| 106 | static hva_t kvmppc_mmu_book3s_64_get_pteg( | 122 | static hva_t kvmppc_mmu_book3s_64_get_pteg( |
| @@ -109,13 +125,15 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg( | |||
| 109 | bool second) | 125 | bool second) |
| 110 | { | 126 | { |
| 111 | u64 hash, pteg, htabsize; | 127 | u64 hash, pteg, htabsize; |
| 112 | u32 page; | 128 | u32 ssize; |
| 113 | hva_t r; | 129 | hva_t r; |
| 130 | u64 vpn; | ||
| 114 | 131 | ||
| 115 | page = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); | ||
| 116 | htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1); | 132 | htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1); |
| 117 | 133 | ||
| 118 | hash = slbe->vsid ^ page; | 134 | vpn = kvmppc_slb_calc_vpn(slbe, eaddr); |
| 135 | ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M; | ||
| 136 | hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize); | ||
| 119 | if (second) | 137 | if (second) |
| 120 | hash = ~hash; | 138 | hash = ~hash; |
| 121 | hash &= ((1ULL << 39ULL) - 1ULL); | 139 | hash &= ((1ULL << 39ULL) - 1ULL); |
| @@ -146,7 +164,7 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) | |||
| 146 | u64 avpn; | 164 | u64 avpn; |
| 147 | 165 | ||
| 148 | avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); | 166 | avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); |
| 149 | avpn |= slbe->vsid << (28 - p); | 167 | avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); |
| 150 | 168 | ||
| 151 | if (p < 24) | 169 | if (p < 24) |
| 152 | avpn >>= ((80 - p) - 56) - 8; | 170 | avpn >>= ((80 - p) - 56) - 8; |
| @@ -189,13 +207,15 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
| 189 | if (!slbe) | 207 | if (!slbe) |
| 190 | goto no_seg_found; | 208 | goto no_seg_found; |
| 191 | 209 | ||
| 210 | avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); | ||
| 211 | if (slbe->tb) | ||
| 212 | avpn |= SLB_VSID_B_1T; | ||
| 213 | |||
| 192 | do_second: | 214 | do_second: |
| 193 | ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); | 215 | ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second); |
| 194 | if (kvm_is_error_hva(ptegp)) | 216 | if (kvm_is_error_hva(ptegp)) |
| 195 | goto no_page_found; | 217 | goto no_page_found; |
| 196 | 218 | ||
| 197 | avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); | ||
| 198 | |||
| 199 | if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { | 219 | if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { |
| 200 | printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp); | 220 | printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp); |
| 201 | goto no_page_found; | 221 | goto no_page_found; |
| @@ -218,7 +238,7 @@ do_second: | |||
| 218 | continue; | 238 | continue; |
| 219 | 239 | ||
| 220 | /* AVPN compare */ | 240 | /* AVPN compare */ |
| 221 | if (HPTE_V_AVPN_VAL(avpn) == HPTE_V_AVPN_VAL(v)) { | 241 | if (HPTE_V_COMPARE(avpn, v)) { |
| 222 | u8 pp = (r & HPTE_R_PP) | key; | 242 | u8 pp = (r & HPTE_R_PP) | key; |
| 223 | int eaddr_mask = 0xFFF; | 243 | int eaddr_mask = 0xFFF; |
| 224 | 244 | ||
| @@ -324,7 +344,7 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) | |||
| 324 | slbe->large = (rs & SLB_VSID_L) ? 1 : 0; | 344 | slbe->large = (rs & SLB_VSID_L) ? 1 : 0; |
| 325 | slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; | 345 | slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; |
| 326 | slbe->esid = slbe->tb ? esid_1t : esid; | 346 | slbe->esid = slbe->tb ? esid_1t : esid; |
| 327 | slbe->vsid = rs >> 12; | 347 | slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16); |
| 328 | slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; | 348 | slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; |
| 329 | slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; | 349 | slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; |
| 330 | slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0; | 350 | slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0; |
| @@ -365,6 +385,7 @@ static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) | |||
| 365 | static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) | 385 | static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) |
| 366 | { | 386 | { |
| 367 | struct kvmppc_slb *slbe; | 387 | struct kvmppc_slb *slbe; |
| 388 | u64 seg_size; | ||
| 368 | 389 | ||
| 369 | dprintk("KVM MMU: slbie(0x%llx)\n", ea); | 390 | dprintk("KVM MMU: slbie(0x%llx)\n", ea); |
| 370 | 391 | ||
| @@ -377,7 +398,8 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) | |||
| 377 | 398 | ||
| 378 | slbe->valid = false; | 399 | slbe->valid = false; |
| 379 | 400 | ||
| 380 | kvmppc_mmu_map_segment(vcpu, ea); | 401 | seg_size = 1ull << kvmppc_slb_sid_shift(slbe); |
| 402 | kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size); | ||
| 381 | } | 403 | } |
| 382 | 404 | ||
| 383 | static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) | 405 | static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) |
| @@ -457,8 +479,14 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
| 457 | 479 | ||
| 458 | if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | 480 | if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
| 459 | slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); | 481 | slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); |
| 460 | if (slb) | 482 | if (slb) { |
| 461 | gvsid = slb->vsid; | 483 | gvsid = slb->vsid; |
| 484 | if (slb->tb) { | ||
| 485 | gvsid <<= SID_SHIFT_1T - SID_SHIFT; | ||
| 486 | gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); | ||
| 487 | gvsid |= VSID_1T; | ||
| 488 | } | ||
| 489 | } | ||
| 462 | } | 490 | } |
| 463 | 491 | ||
| 464 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { | 492 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 2c6e7ee8be34..b350d9494b26 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
| @@ -301,6 +301,23 @@ out: | |||
| 301 | return r; | 301 | return r; |
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) | ||
| 305 | { | ||
| 306 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | ||
| 307 | ulong seg_mask = -seg_size; | ||
| 308 | int i; | ||
| 309 | |||
| 310 | for (i = 1; i < svcpu->slb_max; i++) { | ||
| 311 | if ((svcpu->slb[i].esid & SLB_ESID_V) && | ||
| 312 | (svcpu->slb[i].esid & seg_mask) == ea) { | ||
| 313 | /* Invalidate this entry */ | ||
| 314 | svcpu->slb[i].esid = 0; | ||
| 315 | } | ||
| 316 | } | ||
| 317 | |||
| 318 | svcpu_put(svcpu); | ||
| 319 | } | ||
| 320 | |||
| 304 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | 321 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) |
| 305 | { | 322 | { |
| 306 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 323 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index bdc40b8e77d9..19498a567a81 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
| @@ -1239,8 +1239,7 @@ out: | |||
| 1239 | #ifdef CONFIG_PPC64 | 1239 | #ifdef CONFIG_PPC64 |
| 1240 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) | 1240 | int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info) |
| 1241 | { | 1241 | { |
| 1242 | /* No flags */ | 1242 | info->flags = KVM_PPC_1T_SEGMENTS; |
| 1243 | info->flags = 0; | ||
| 1244 | 1243 | ||
| 1245 | /* SLB is always 64 entries */ | 1244 | /* SLB is always 64 entries */ |
| 1246 | info->slb_size = 64; | 1245 | info->slb_size = 64; |
