aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/intel-iommu.c
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2014-03-09 19:14:00 -0400
committerDavid Woodhouse <David.Woodhouse@intel.com>2014-03-24 10:08:00 -0400
commit5040a918bd109a1903e7babac817325620939fa4 (patch)
tree6a7a0d118b6b225ebbc0359c2e676fd401ba9322 /drivers/iommu/intel-iommu.c
parent207e35920d2a6ee1d2f48fcd6bb34b42246192d1 (diff)
iommu/vt-d: Rename 'hwdev' variables to 'dev' now that that's the norm
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r--drivers/iommu/intel-iommu.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2fe55bb6437f..60f8ceeb06e4 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2955,7 +2955,7 @@ static int iommu_no_mapping(struct device *dev)
2955 return 0; 2955 return 0;
2956} 2956}
2957 2957
2958static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, 2958static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
2959 size_t size, int dir, u64 dma_mask) 2959 size_t size, int dir, u64 dma_mask)
2960{ 2960{
2961 struct dmar_domain *domain; 2961 struct dmar_domain *domain;
@@ -2968,17 +2968,17 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
2968 2968
2969 BUG_ON(dir == DMA_NONE); 2969 BUG_ON(dir == DMA_NONE);
2970 2970
2971 if (iommu_no_mapping(hwdev)) 2971 if (iommu_no_mapping(dev))
2972 return paddr; 2972 return paddr;
2973 2973
2974 domain = get_valid_domain_for_dev(hwdev); 2974 domain = get_valid_domain_for_dev(dev);
2975 if (!domain) 2975 if (!domain)
2976 return 0; 2976 return 0;
2977 2977
2978 iommu = domain_get_iommu(domain); 2978 iommu = domain_get_iommu(domain);
2979 size = aligned_nrpages(paddr, size); 2979 size = aligned_nrpages(paddr, size);
2980 2980
2981 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask); 2981 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
2982 if (!iova) 2982 if (!iova)
2983 goto error; 2983 goto error;
2984 2984
@@ -3016,7 +3016,7 @@ error:
3016 if (iova) 3016 if (iova)
3017 __free_iova(&domain->iovad, iova); 3017 __free_iova(&domain->iovad, iova);
3018 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n", 3018 printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
3019 dev_name(hwdev), size, (unsigned long long)paddr, dir); 3019 dev_name(dev), size, (unsigned long long)paddr, dir);
3020 return 0; 3020 return 0;
3021} 3021}
3022 3022
@@ -3155,7 +3155,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
3155 } 3155 }
3156} 3156}
3157 3157
3158static void *intel_alloc_coherent(struct device *hwdev, size_t size, 3158static void *intel_alloc_coherent(struct device *dev, size_t size,
3159 dma_addr_t *dma_handle, gfp_t flags, 3159 dma_addr_t *dma_handle, gfp_t flags,
3160 struct dma_attrs *attrs) 3160 struct dma_attrs *attrs)
3161{ 3161{
@@ -3165,10 +3165,10 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
3165 size = PAGE_ALIGN(size); 3165 size = PAGE_ALIGN(size);
3166 order = get_order(size); 3166 order = get_order(size);
3167 3167
3168 if (!iommu_no_mapping(hwdev)) 3168 if (!iommu_no_mapping(dev))
3169 flags &= ~(GFP_DMA | GFP_DMA32); 3169 flags &= ~(GFP_DMA | GFP_DMA32);
3170 else if (hwdev->coherent_dma_mask < dma_get_required_mask(hwdev)) { 3170 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3171 if (hwdev->coherent_dma_mask < DMA_BIT_MASK(32)) 3171 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3172 flags |= GFP_DMA; 3172 flags |= GFP_DMA;
3173 else 3173 else
3174 flags |= GFP_DMA32; 3174 flags |= GFP_DMA32;
@@ -3179,16 +3179,16 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
3179 return NULL; 3179 return NULL;
3180 memset(vaddr, 0, size); 3180 memset(vaddr, 0, size);
3181 3181
3182 *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, 3182 *dma_handle = __intel_map_single(dev, virt_to_bus(vaddr), size,
3183 DMA_BIDIRECTIONAL, 3183 DMA_BIDIRECTIONAL,
3184 hwdev->coherent_dma_mask); 3184 dev->coherent_dma_mask);
3185 if (*dma_handle) 3185 if (*dma_handle)
3186 return vaddr; 3186 return vaddr;
3187 free_pages((unsigned long)vaddr, order); 3187 free_pages((unsigned long)vaddr, order);
3188 return NULL; 3188 return NULL;
3189} 3189}
3190 3190
3191static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, 3191static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3192 dma_addr_t dma_handle, struct dma_attrs *attrs) 3192 dma_addr_t dma_handle, struct dma_attrs *attrs)
3193{ 3193{
3194 int order; 3194 int order;
@@ -3196,11 +3196,11 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
3196 size = PAGE_ALIGN(size); 3196 size = PAGE_ALIGN(size);
3197 order = get_order(size); 3197 order = get_order(size);
3198 3198
3199 intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); 3199 intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
3200 free_pages((unsigned long)vaddr, order); 3200 free_pages((unsigned long)vaddr, order);
3201} 3201}
3202 3202
3203static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, 3203static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
3204 int nelems, enum dma_data_direction dir, 3204 int nelems, enum dma_data_direction dir,
3205 struct dma_attrs *attrs) 3205 struct dma_attrs *attrs)
3206{ 3206{
@@ -3210,10 +3210,10 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
3210 struct intel_iommu *iommu; 3210 struct intel_iommu *iommu;
3211 struct page *freelist; 3211 struct page *freelist;
3212 3212
3213 if (iommu_no_mapping(hwdev)) 3213 if (iommu_no_mapping(dev))
3214 return; 3214 return;
3215 3215
3216 domain = find_domain(hwdev); 3216 domain = find_domain(dev);
3217 BUG_ON(!domain); 3217 BUG_ON(!domain);
3218 3218
3219 iommu = domain_get_iommu(domain); 3219 iommu = domain_get_iommu(domain);
@@ -3257,7 +3257,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
3257 return nelems; 3257 return nelems;
3258} 3258}
3259 3259
3260static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, 3260static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
3261 enum dma_data_direction dir, struct dma_attrs *attrs) 3261 enum dma_data_direction dir, struct dma_attrs *attrs)
3262{ 3262{
3263 int i; 3263 int i;
@@ -3271,10 +3271,10 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
3271 struct intel_iommu *iommu; 3271 struct intel_iommu *iommu;
3272 3272
3273 BUG_ON(dir == DMA_NONE); 3273 BUG_ON(dir == DMA_NONE);
3274 if (iommu_no_mapping(hwdev)) 3274 if (iommu_no_mapping(dev))
3275 return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); 3275 return intel_nontranslate_map_sg(dev, sglist, nelems, dir);
3276 3276
3277 domain = get_valid_domain_for_dev(hwdev); 3277 domain = get_valid_domain_for_dev(dev);
3278 if (!domain) 3278 if (!domain)
3279 return 0; 3279 return 0;
3280 3280
@@ -3283,8 +3283,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
3283 for_each_sg(sglist, sg, nelems, i) 3283 for_each_sg(sglist, sg, nelems, i)
3284 size += aligned_nrpages(sg->offset, sg->length); 3284 size += aligned_nrpages(sg->offset, sg->length);
3285 3285
3286 iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), 3286 iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
3287 *hwdev->dma_mask); 3287 *dev->dma_mask);
3288 if (!iova) { 3288 if (!iova) {
3289 sglist->dma_length = 0; 3289 sglist->dma_length = 0;
3290 return 0; 3290 return 0;