aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index e18b93cc1224..2fe55bb6437f 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2835,7 +2835,6 @@ static struct iova *intel_alloc_iova(struct device *dev,
2835 struct dmar_domain *domain, 2835 struct dmar_domain *domain,
2836 unsigned long nrpages, uint64_t dma_mask) 2836 unsigned long nrpages, uint64_t dma_mask)
2837{ 2837{
2838 struct pci_dev *pdev = to_pci_dev(dev);
2839 struct iova *iova = NULL; 2838 struct iova *iova = NULL;
2840 2839
2841 /* Restrict dma_mask to the width that the iommu can handle */ 2840 /* Restrict dma_mask to the width that the iommu can handle */
@@ -2855,7 +2854,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
2855 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1); 2854 iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
2856 if (unlikely(!iova)) { 2855 if (unlikely(!iova)) {
2857 printk(KERN_ERR "Allocating %ld-page iova for %s failed", 2856 printk(KERN_ERR "Allocating %ld-page iova for %s failed",
2858 nrpages, pci_name(pdev)); 2857 nrpages, dev_name(dev));
2859 return NULL; 2858 return NULL;
2860 } 2859 }
2861 2860
@@ -2959,7 +2958,6 @@ static int iommu_no_mapping(struct device *dev)
2959static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, 2958static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2960 size_t size, int dir, u64 dma_mask) 2959 size_t size, int dir, u64 dma_mask)
2961{ 2960{
2962 struct pci_dev *pdev = to_pci_dev(hwdev);
2963 struct dmar_domain *domain; 2961 struct dmar_domain *domain;
2964 phys_addr_t start_paddr; 2962 phys_addr_t start_paddr;
2965 struct iova *iova; 2963 struct iova *iova;
@@ -3018,7 +3016,7 @@ error:
3018 if (iova) 3016 if (iova)
3019 __free_iova(&domain->iovad, iova); 3017 __free_iova(&domain->iovad, iova);
3020 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", 3018 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
3021 pci_name(pdev), size, (unsigned long long)paddr, dir); 3019 dev_name(hwdev), size, (unsigned long long)paddr, dir);
3022 return 0; 3020 return 0;
3023} 3021}
3024 3022
@@ -3115,7 +3113,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3115 size_t size, enum dma_data_direction dir, 3113 size_t size, enum dma_data_direction dir,
3116 struct dma_attrs *attrs) 3114 struct dma_attrs *attrs)
3117{ 3115{
3118 struct pci_dev *pdev = to_pci_dev(dev);
3119 struct dmar_domain *domain; 3116 struct dmar_domain *domain;
3120 unsigned long start_pfn, last_pfn; 3117 unsigned long start_pfn, last_pfn;
3121 struct iova *iova; 3118 struct iova *iova;
@@ -3139,7 +3136,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3139 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; 3136 last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
3140 3137
3141 pr_debug("Device %s unmapping: pfn %lx-%lx\n", 3138 pr_debug("Device %s unmapping: pfn %lx-%lx\n",
3142 pci_name(pdev), start_pfn, last_pfn); 3139 dev_name(dev), start_pfn, last_pfn);
3143 3140
3144 freelist = domain_unmap(domain, start_pfn, last_pfn); 3141 freelist = domain_unmap(domain, start_pfn, last_pfn);
3145 3142
@@ -3264,7 +3261,6 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
3264 enum dma_data_direction dir, struct dma_attrs *attrs) 3261 enum dma_data_direction dir, struct dma_attrs *attrs)
3265{ 3262{
3266 int i; 3263 int i;
3267 struct pci_dev *pdev = to_pci_dev(hwdev);
3268 struct dmar_domain *domain; 3264 struct dmar_domain *domain;
3269 size_t size = 0; 3265 size_t size = 0;
3270 int prot = 0; 3266 int prot = 0;
@@ -3288,7 +3284,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
3288 size += aligned_nrpages(sg->offset, sg->length); 3284 size += aligned_nrpages(sg->offset, sg->length);
3289 3285
3290 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), 3286 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
3291 pdev->dma_mask); 3287 *hwdev->dma_mask);
3292 if (!iova) { 3288 if (!iova) {
3293 sglist->dma_length = 0; 3289 sglist->dma_length = 0;
3294 return 0; 3290 return 0;