diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2015-10-13 12:18:10 -0400 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2015-10-15 10:35:28 -0400 |
commit | 0204a49609824163092c32a8aeb073f7e9acc76d (patch) | |
tree | acf951173aa656c2781694deb985e3a7f14b21ce /drivers/iommu/intel-svm.c | |
parent | a222a7f0bb6c94c31cc9c755110593656f19de89 (diff) |
iommu/vt-d: Add callback to device driver on page faults
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/iommu/intel-svm.c')
-rw-r--r-- | drivers/iommu/intel-svm.c | 26 |
1 files changed, 25 insertions, 1 deletions
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 0e8654282484..006e95dd64ae 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c | |||
@@ -264,7 +264,7 @@ static const struct mmu_notifier_ops intel_mmuops = { | |||
264 | 264 | ||
265 | static DEFINE_MUTEX(pasid_mutex); | 265 | static DEFINE_MUTEX(pasid_mutex); |
266 | 266 | ||
267 | int intel_svm_bind_mm(struct device *dev, int *pasid) | 267 | int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops) |
268 | { | 268 | { |
269 | struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); | 269 | struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); |
270 | struct intel_svm_dev *sdev; | 270 | struct intel_svm_dev *sdev; |
@@ -302,6 +302,10 @@ int intel_svm_bind_mm(struct device *dev, int *pasid) | |||
302 | 302 | ||
303 | list_for_each_entry(sdev, &svm->devs, list) { | 303 | list_for_each_entry(sdev, &svm->devs, list) { |
304 | if (dev == sdev->dev) { | 304 | if (dev == sdev->dev) { |
305 | if (sdev->ops != ops) { | ||
306 | ret = -EBUSY; | ||
307 | goto out; | ||
308 | } | ||
305 | sdev->users++; | 309 | sdev->users++; |
306 | goto success; | 310 | goto success; |
307 | } | 311 | } |
@@ -327,6 +331,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid) | |||
327 | } | 331 | } |
328 | /* Finish the setup now we know we're keeping it */ | 332 | /* Finish the setup now we know we're keeping it */ |
329 | sdev->users = 1; | 333 | sdev->users = 1; |
334 | sdev->ops = ops; | ||
330 | init_rcu_head(&sdev->rcu); | 335 | init_rcu_head(&sdev->rcu); |
331 | 336 | ||
332 | if (!svm) { | 337 | if (!svm) { |
@@ -456,6 +461,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) | |||
456 | tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; | 461 | tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; |
457 | head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; | 462 | head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; |
458 | while (head != tail) { | 463 | while (head != tail) { |
464 | struct intel_svm_dev *sdev; | ||
459 | struct vm_area_struct *vma; | 465 | struct vm_area_struct *vma; |
460 | struct page_req_dsc *req; | 466 | struct page_req_dsc *req; |
461 | struct qi_desc resp; | 467 | struct qi_desc resp; |
@@ -507,6 +513,24 @@ static irqreturn_t prq_event_thread(int irq, void *d) | |||
507 | up_read(&svm->mm->mmap_sem); | 513 | up_read(&svm->mm->mmap_sem); |
508 | bad_req: | 514 | bad_req: |
509 | /* Accounting for major/minor faults? */ | 515 | /* Accounting for major/minor faults? */ |
516 | rcu_read_lock(); | ||
517 | list_for_each_entry_rcu(sdev, &svm->devs, list) { | ||
518 | if (sdev->sid == PCI_DEVID(req->bus, req->devfn)); | ||
519 | break; | ||
520 | } | ||
521 | /* Other devices can go away, but the drivers are not permitted | ||
522 | * to unbind while any page faults might be in flight. So it's | ||
523 | * OK to drop the 'lock' here now we have it. */ | ||
524 | rcu_read_unlock(); | ||
525 | |||
526 | if (WARN_ON(&sdev->list == &svm->devs)) | ||
527 | sdev = NULL; | ||
528 | |||
529 | if (sdev && sdev->ops && sdev->ops->fault_cb) { | ||
530 | int rwxp = (req->rd_req << 3) | (req->wr_req << 2) | | ||
531 | (req->wr_req << 1) | (req->exe_req); | ||
532 | sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, req->private, rwxp, result); | ||
533 | } | ||
510 | 534 | ||
511 | if (req->lpig) { | 535 | if (req->lpig) { |
512 | /* Page Group Response */ | 536 | /* Page Group Response */ |