diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_pr_papr.c')
| -rw-r--r-- | arch/powerpc/kvm/book3s_pr_papr.c | 70 |
1 files changed, 53 insertions, 17 deletions
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index bcbeeb62dd13..8a4205fa774f 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c | |||
| @@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) | |||
| 50 | pteg_addr = get_pteg_addr(vcpu, pte_index); | 50 | pteg_addr = get_pteg_addr(vcpu, pte_index); |
| 51 | 51 | ||
| 52 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | 52 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
| 53 | copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); | 53 | ret = H_FUNCTION; |
| 54 | if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg))) | ||
| 55 | goto done; | ||
| 54 | hpte = pteg; | 56 | hpte = pteg; |
| 55 | 57 | ||
| 56 | ret = H_PTEG_FULL; | 58 | ret = H_PTEG_FULL; |
| @@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) | |||
| 71 | hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); | 73 | hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); |
| 72 | hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); | 74 | hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); |
| 73 | pteg_addr += i * HPTE_SIZE; | 75 | pteg_addr += i * HPTE_SIZE; |
| 74 | copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); | 76 | ret = H_FUNCTION; |
| 77 | if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE)) | ||
| 78 | goto done; | ||
| 75 | kvmppc_set_gpr(vcpu, 4, pte_index | i); | 79 | kvmppc_set_gpr(vcpu, 4, pte_index | i); |
| 76 | ret = H_SUCCESS; | 80 | ret = H_SUCCESS; |
| 77 | 81 | ||
| @@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) | |||
| 93 | 97 | ||
| 94 | pteg = get_pteg_addr(vcpu, pte_index); | 98 | pteg = get_pteg_addr(vcpu, pte_index); |
| 95 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | 99 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
| 96 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 100 | ret = H_FUNCTION; |
| 101 | if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) | ||
| 102 | goto done; | ||
| 97 | pte[0] = be64_to_cpu((__force __be64)pte[0]); | 103 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
| 98 | pte[1] = be64_to_cpu((__force __be64)pte[1]); | 104 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
| 99 | 105 | ||
| @@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) | |||
| 103 | ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) | 109 | ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) |
| 104 | goto done; | 110 | goto done; |
| 105 | 111 | ||
| 106 | copy_to_user((void __user *)pteg, &v, sizeof(v)); | 112 | ret = H_FUNCTION; |
| 113 | if (copy_to_user((void __user *)pteg, &v, sizeof(v))) | ||
| 114 | goto done; | ||
| 107 | 115 | ||
| 108 | rb = compute_tlbie_rb(pte[0], pte[1], pte_index); | 116 | rb = compute_tlbie_rb(pte[0], pte[1], pte_index); |
| 109 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); | 117 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
| @@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) | |||
| 171 | } | 179 | } |
| 172 | 180 | ||
| 173 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); | 181 | pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); |
| 174 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 182 | if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) { |
| 183 | ret = H_FUNCTION; | ||
| 184 | break; | ||
| 185 | } | ||
| 175 | pte[0] = be64_to_cpu((__force __be64)pte[0]); | 186 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
| 176 | pte[1] = be64_to_cpu((__force __be64)pte[1]); | 187 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
| 177 | 188 | ||
| @@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) | |||
| 184 | tsh |= H_BULK_REMOVE_NOT_FOUND; | 195 | tsh |= H_BULK_REMOVE_NOT_FOUND; |
| 185 | } else { | 196 | } else { |
| 186 | /* Splat the pteg in (userland) hpt */ | 197 | /* Splat the pteg in (userland) hpt */ |
| 187 | copy_to_user((void __user *)pteg, &v, sizeof(v)); | 198 | if (copy_to_user((void __user *)pteg, &v, sizeof(v))) { |
| 199 | ret = H_FUNCTION; | ||
| 200 | break; | ||
| 201 | } | ||
| 188 | 202 | ||
| 189 | rb = compute_tlbie_rb(pte[0], pte[1], | 203 | rb = compute_tlbie_rb(pte[0], pte[1], |
| 190 | tsh & H_BULK_REMOVE_PTEX); | 204 | tsh & H_BULK_REMOVE_PTEX); |
| @@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
| 211 | 225 | ||
| 212 | pteg = get_pteg_addr(vcpu, pte_index); | 226 | pteg = get_pteg_addr(vcpu, pte_index); |
| 213 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); | 227 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
| 214 | copy_from_user(pte, (void __user *)pteg, sizeof(pte)); | 228 | ret = H_FUNCTION; |
| 229 | if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) | ||
| 230 | goto done; | ||
| 215 | pte[0] = be64_to_cpu((__force __be64)pte[0]); | 231 | pte[0] = be64_to_cpu((__force __be64)pte[0]); |
| 216 | pte[1] = be64_to_cpu((__force __be64)pte[1]); | 232 | pte[1] = be64_to_cpu((__force __be64)pte[1]); |
| 217 | 233 | ||
| @@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
| 234 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); | 250 | vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); |
| 235 | pte[0] = (__force u64)cpu_to_be64(pte[0]); | 251 | pte[0] = (__force u64)cpu_to_be64(pte[0]); |
| 236 | pte[1] = (__force u64)cpu_to_be64(pte[1]); | 252 | pte[1] = (__force u64)cpu_to_be64(pte[1]); |
| 237 | copy_to_user((void __user *)pteg, pte, sizeof(pte)); | 253 | ret = H_FUNCTION; |
| 254 | if (copy_to_user((void __user *)pteg, pte, sizeof(pte))) | ||
| 255 | goto done; | ||
| 238 | ret = H_SUCCESS; | 256 | ret = H_SUCCESS; |
| 239 | 257 | ||
| 240 | done: | 258 | done: |
| @@ -244,36 +262,37 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) | |||
| 244 | return EMULATE_DONE; | 262 | return EMULATE_DONE; |
| 245 | } | 263 | } |
| 246 | 264 | ||
| 247 | static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) | 265 | static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu) |
| 248 | { | 266 | { |
| 249 | unsigned long liobn = kvmppc_get_gpr(vcpu, 4); | ||
| 250 | unsigned long ioba = kvmppc_get_gpr(vcpu, 5); | ||
| 251 | unsigned long tce = kvmppc_get_gpr(vcpu, 6); | ||
| 252 | long rc; | 267 | long rc; |
| 253 | 268 | ||
| 254 | rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); | 269 | rc = kvmppc_h_logical_ci_load(vcpu); |
| 255 | if (rc == H_TOO_HARD) | 270 | if (rc == H_TOO_HARD) |
| 256 | return EMULATE_FAIL; | 271 | return EMULATE_FAIL; |
| 257 | kvmppc_set_gpr(vcpu, 3, rc); | 272 | kvmppc_set_gpr(vcpu, 3, rc); |
| 258 | return EMULATE_DONE; | 273 | return EMULATE_DONE; |
| 259 | } | 274 | } |
| 260 | 275 | ||
| 261 | static int kvmppc_h_pr_logical_ci_load(struct kvm_vcpu *vcpu) | 276 | static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu) |
| 262 | { | 277 | { |
| 263 | long rc; | 278 | long rc; |
| 264 | 279 | ||
| 265 | rc = kvmppc_h_logical_ci_load(vcpu); | 280 | rc = kvmppc_h_logical_ci_store(vcpu); |
| 266 | if (rc == H_TOO_HARD) | 281 | if (rc == H_TOO_HARD) |
| 267 | return EMULATE_FAIL; | 282 | return EMULATE_FAIL; |
| 268 | kvmppc_set_gpr(vcpu, 3, rc); | 283 | kvmppc_set_gpr(vcpu, 3, rc); |
| 269 | return EMULATE_DONE; | 284 | return EMULATE_DONE; |
| 270 | } | 285 | } |
| 271 | 286 | ||
| 272 | static int kvmppc_h_pr_logical_ci_store(struct kvm_vcpu *vcpu) | 287 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
| 288 | static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) | ||
| 273 | { | 289 | { |
| 290 | unsigned long liobn = kvmppc_get_gpr(vcpu, 4); | ||
| 291 | unsigned long ioba = kvmppc_get_gpr(vcpu, 5); | ||
| 292 | unsigned long tce = kvmppc_get_gpr(vcpu, 6); | ||
| 274 | long rc; | 293 | long rc; |
| 275 | 294 | ||
| 276 | rc = kvmppc_h_logical_ci_store(vcpu); | 295 | rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); |
| 277 | if (rc == H_TOO_HARD) | 296 | if (rc == H_TOO_HARD) |
| 278 | return EMULATE_FAIL; | 297 | return EMULATE_FAIL; |
| 279 | kvmppc_set_gpr(vcpu, 3, rc); | 298 | kvmppc_set_gpr(vcpu, 3, rc); |
| @@ -311,6 +330,23 @@ static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu) | |||
| 311 | return EMULATE_DONE; | 330 | return EMULATE_DONE; |
| 312 | } | 331 | } |
| 313 | 332 | ||
| 333 | #else /* CONFIG_SPAPR_TCE_IOMMU */ | ||
| 334 | static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) | ||
| 335 | { | ||
| 336 | return EMULATE_FAIL; | ||
| 337 | } | ||
| 338 | |||
| 339 | static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu) | ||
| 340 | { | ||
| 341 | return EMULATE_FAIL; | ||
| 342 | } | ||
| 343 | |||
| 344 | static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu) | ||
| 345 | { | ||
| 346 | return EMULATE_FAIL; | ||
| 347 | } | ||
| 348 | #endif /* CONFIG_SPAPR_TCE_IOMMU */ | ||
| 349 | |||
| 314 | static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) | 350 | static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) |
| 315 | { | 351 | { |
| 316 | long rc = kvmppc_xics_hcall(vcpu, cmd); | 352 | long rc = kvmppc_xics_hcall(vcpu, cmd); |
