aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_vio_hv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_vio_hv.c')
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c39
1 files changed, 31 insertions, 8 deletions
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 506a4d400458..6821ead4b4eb 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -187,12 +187,35 @@ long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
187EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua); 187EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
188 188
189#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 189#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
190static void kvmppc_rm_clear_tce(struct iommu_table *tbl, unsigned long entry) 190static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
191 unsigned long entry, unsigned long *hpa,
192 enum dma_data_direction *direction)
193{
194 long ret;
195
196 ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
197
198 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
199 (*direction == DMA_BIDIRECTIONAL))) {
200 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RM(tbl, entry);
201 /*
202 * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
203 * calling this so we still get here a valid UA.
204 */
205 if (pua && *pua)
206 mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
207 }
208
209 return ret;
210}
211
212static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
213 unsigned long entry)
191{ 214{
192 unsigned long hpa = 0; 215 unsigned long hpa = 0;
193 enum dma_data_direction dir = DMA_NONE; 216 enum dma_data_direction dir = DMA_NONE;
194 217
195 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); 218 iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
196} 219}
197 220
198static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm, 221static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
@@ -224,7 +247,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
224 unsigned long hpa = 0; 247 unsigned long hpa = 0;
225 long ret; 248 long ret;
226 249
227 if (iommu_tce_xchg_rm(tbl, entry, &hpa, &dir)) 250 if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
228 /* 251 /*
229 * real mode xchg can fail if struct page crosses 252 * real mode xchg can fail if struct page crosses
230 * a page boundary 253 * a page boundary
@@ -236,7 +259,7 @@ static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
236 259
237 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry); 260 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
238 if (ret) 261 if (ret)
239 iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); 262 iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
240 263
241 return ret; 264 return ret;
242} 265}
@@ -282,7 +305,7 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
282 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem))) 305 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
283 return H_CLOSED; 306 return H_CLOSED;
284 307
285 ret = iommu_tce_xchg_rm(tbl, entry, &hpa, &dir); 308 ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
286 if (ret) { 309 if (ret) {
287 mm_iommu_mapped_dec(mem); 310 mm_iommu_mapped_dec(mem);
288 /* 311 /*
@@ -371,7 +394,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
371 return ret; 394 return ret;
372 395
373 WARN_ON_ONCE_RM(1); 396 WARN_ON_ONCE_RM(1);
374 kvmppc_rm_clear_tce(stit->tbl, entry); 397 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
375 } 398 }
376 399
377 kvmppc_tce_put(stt, entry, tce); 400 kvmppc_tce_put(stt, entry, tce);
@@ -520,7 +543,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
520 goto unlock_exit; 543 goto unlock_exit;
521 544
522 WARN_ON_ONCE_RM(1); 545 WARN_ON_ONCE_RM(1);
523 kvmppc_rm_clear_tce(stit->tbl, entry); 546 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
524 } 547 }
525 548
526 kvmppc_tce_put(stt, entry + i, tce); 549 kvmppc_tce_put(stt, entry + i, tce);
@@ -571,7 +594,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
571 return ret; 594 return ret;
572 595
573 WARN_ON_ONCE_RM(1); 596 WARN_ON_ONCE_RM(1);
574 kvmppc_rm_clear_tce(stit->tbl, entry); 597 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
575 } 598 }
576 } 599 }
577 600