aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-01-12 05:01:12 -0500
commit1f16f116b01c110db20ab808562c8b8bc3ee3d6e (patch)
tree44db563f64cf5f8d62af8f99a61e2b248c44ea3a /drivers/iommu
parent03724ac3d48f8f0e3caf1d30fa134f8fd96c94e2 (diff)
parentf9eccf24615672896dc13251410c3f2f33a14f95 (diff)
Merge branches 'clockevents/4.4-fixes' and 'clockevents/4.5-fixes' of http://git.linaro.org/people/daniel.lezcano/linux into timers/urgent
Pull in fixes from Daniel Lezcano: - Fix the vt8500 timer leading to a system lock up when dealing with too small delta (Roman Volkov) - Select the CLKSRC_MMIO when the fsl_ftm_timer is enabled with COMPILE_TEST (Daniel Lezcano) - Prevent to compile timers using the 'iomem' API when the architecture has not HAS_IOMEM set (Richard Weinberger)
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu_v2.c20
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/intel-svm.c20
-rw-r--r--drivers/iommu/iommu.c2
4 files changed, 41 insertions, 5 deletions
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d21d4edf7236..7caf2fa237f2 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -494,6 +494,22 @@ static void handle_fault_error(struct fault *fault)
494 } 494 }
495} 495}
496 496
497static bool access_error(struct vm_area_struct *vma, struct fault *fault)
498{
499 unsigned long requested = 0;
500
501 if (fault->flags & PPR_FAULT_EXEC)
502 requested |= VM_EXEC;
503
504 if (fault->flags & PPR_FAULT_READ)
505 requested |= VM_READ;
506
507 if (fault->flags & PPR_FAULT_WRITE)
508 requested |= VM_WRITE;
509
510 return (requested & ~vma->vm_flags) != 0;
511}
512
497static void do_fault(struct work_struct *work) 513static void do_fault(struct work_struct *work)
498{ 514{
499 struct fault *fault = container_of(work, struct fault, work); 515 struct fault *fault = container_of(work, struct fault, work);
@@ -516,8 +532,8 @@ static void do_fault(struct work_struct *work)
516 goto out; 532 goto out;
517 } 533 }
518 534
519 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) { 535 /* Check if we have the right permissions on the vma */
520 /* handle_mm_fault would BUG_ON() */ 536 if (access_error(vma, fault)) {
521 up_read(&mm->mmap_sem); 537 up_read(&mm->mmap_sem);
522 handle_fault_error(fault); 538 handle_fault_error(fault);
523 goto out; 539 goto out;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f1042daef9ad..ac7387686ddc 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
2159 sg_res = aligned_nrpages(sg->offset, sg->length); 2159 sg_res = aligned_nrpages(sg->offset, sg->length);
2160 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset; 2160 sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
2161 sg->dma_length = sg->length; 2161 sg->dma_length = sg->length;
2162 pteval = (sg_phys(sg) & PAGE_MASK) | prot; 2162 pteval = page_to_phys(sg_page(sg)) | prot;
2163 phys_pfn = pteval >> VTD_PAGE_SHIFT; 2163 phys_pfn = pteval >> VTD_PAGE_SHIFT;
2164 } 2164 }
2165 2165
@@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
3704 3704
3705 for_each_sg(sglist, sg, nelems, i) { 3705 for_each_sg(sglist, sg, nelems, i) {
3706 BUG_ON(!sg_page(sg)); 3706 BUG_ON(!sg_page(sg));
3707 sg->dma_address = sg_phys(sg); 3707 sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
3708 sg->dma_length = sg->length; 3708 sg->dma_length = sg->length;
3709 } 3709 }
3710 return nelems; 3710 return nelems;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index c69e3f9ec958..50464833d0b8 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -484,6 +484,23 @@ struct page_req_dsc {
484}; 484};
485 485
486#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) 486#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
487
488static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
489{
490 unsigned long requested = 0;
491
492 if (req->exe_req)
493 requested |= VM_EXEC;
494
495 if (req->rd_req)
496 requested |= VM_READ;
497
498 if (req->wr_req)
499 requested |= VM_WRITE;
500
501 return (requested & ~vma->vm_flags) != 0;
502}
503
487static irqreturn_t prq_event_thread(int irq, void *d) 504static irqreturn_t prq_event_thread(int irq, void *d)
488{ 505{
489 struct intel_iommu *iommu = d; 506 struct intel_iommu *iommu = d;
@@ -539,6 +556,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
539 if (!vma || address < vma->vm_start) 556 if (!vma || address < vma->vm_start)
540 goto invalid; 557 goto invalid;
541 558
559 if (access_error(vma, req))
560 goto invalid;
561
542 ret = handle_mm_fault(svm->mm, vma, address, 562 ret = handle_mm_fault(svm->mm, vma, address,
543 req->wr_req ? FAULT_FLAG_WRITE : 0); 563 req->wr_req ? FAULT_FLAG_WRITE : 0);
544 if (ret & VM_FAULT_ERROR) 564 if (ret & VM_FAULT_ERROR)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index abae363c7b9b..0e3b0092ec92 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1430 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); 1430 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1431 1431
1432 for_each_sg(sg, s, nents, i) { 1432 for_each_sg(sg, s, nents, i) {
1433 phys_addr_t phys = sg_phys(s); 1433 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1434 1434
1435 /* 1435 /*
1436 * We are mapping on IOMMU page boundaries, so offset within 1436 * We are mapping on IOMMU page boundaries, so offset within