diff options
author | Jiang Liu <jiang.liu@linux.intel.com> | 2014-07-11 02:19:34 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2014-07-23 10:04:47 -0400 |
commit | d41a4adb1b9208a0ee59e6b4752853c1dd27cc2c (patch) | |
tree | e350f9922d61abc42fafa1da620c02425549d6df /drivers/iommu | |
parent | 2a41ccee2fdc39df5bdcf5819ed3c0b4a62aea43 (diff) |
iommu/vt-d: Simplify intel_unmap_sg() and kill duplicated code
Introduce intel_unmap() to reduce duplicated code in intel_unmap_sg()
and intel_unmap_page().
Also let dma_pte_free_pagetable() to call dma_pte_clear_range() directly,
so caller only needs to call dma_pte_free_pagetable().
Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 70 |
1 files changed, 19 insertions, 51 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 0ba078bc0f32..0852b7021e4a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -984,6 +984,8 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
984 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | 984 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); |
985 | BUG_ON(start_pfn > last_pfn); | 985 | BUG_ON(start_pfn > last_pfn); |
986 | 986 | ||
987 | dma_pte_clear_range(domain, start_pfn, last_pfn); | ||
988 | |||
987 | /* We don't need lock here; nobody else touches the iova range */ | 989 | /* We don't need lock here; nobody else touches the iova range */ |
988 | dma_pte_free_level(domain, agaw_to_level(domain->agaw), | 990 | dma_pte_free_level(domain, agaw_to_level(domain->agaw), |
989 | domain->pgd, 0, start_pfn, last_pfn); | 991 | domain->pgd, 0, start_pfn, last_pfn); |
@@ -2011,12 +2013,14 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, | |||
2011 | /* It is large page*/ | 2013 | /* It is large page*/ |
2012 | if (largepage_lvl > 1) { | 2014 | if (largepage_lvl > 1) { |
2013 | pteval |= DMA_PTE_LARGE_PAGE; | 2015 | pteval |= DMA_PTE_LARGE_PAGE; |
2014 | /* Ensure that old small page tables are removed to make room | 2016 | lvl_pages = lvl_to_nr_pages(largepage_lvl); |
2015 | for superpage, if they exist. */ | 2017 | /* |
2016 | dma_pte_clear_range(domain, iov_pfn, | 2018 | * Ensure that old small page tables are |
2017 | iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); | 2019 | * removed to make room for superpage, |
2020 | * if they exist. | ||
2021 | */ | ||
2018 | dma_pte_free_pagetable(domain, iov_pfn, | 2022 | dma_pte_free_pagetable(domain, iov_pfn, |
2019 | iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); | 2023 | iov_pfn + lvl_pages - 1); |
2020 | } else { | 2024 | } else { |
2021 | pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; | 2025 | pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; |
2022 | } | 2026 | } |
@@ -3148,9 +3152,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *f | |||
3148 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); | 3152 | spin_unlock_irqrestore(&async_umap_flush_lock, flags); |
3149 | } | 3153 | } |
3150 | 3154 | ||
3151 | static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | 3155 | static void intel_unmap(struct device *dev, dma_addr_t dev_addr) |
3152 | size_t size, enum dma_data_direction dir, | ||
3153 | struct dma_attrs *attrs) | ||
3154 | { | 3156 | { |
3155 | struct dmar_domain *domain; | 3157 | struct dmar_domain *domain; |
3156 | unsigned long start_pfn, last_pfn; | 3158 | unsigned long start_pfn, last_pfn; |
@@ -3194,6 +3196,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | |||
3194 | } | 3196 | } |
3195 | } | 3197 | } |
3196 | 3198 | ||
3199 | static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, | ||
3200 | size_t size, enum dma_data_direction dir, | ||
3201 | struct dma_attrs *attrs) | ||
3202 | { | ||
3203 | intel_unmap(dev, dev_addr); | ||
3204 | } | ||
3205 | |||
3197 | static void *intel_alloc_coherent(struct device *dev, size_t size, | 3206 | static void *intel_alloc_coherent(struct device *dev, size_t size, |
3198 | dma_addr_t *dma_handle, gfp_t flags, | 3207 | dma_addr_t *dma_handle, gfp_t flags, |
3199 | struct dma_attrs *attrs) | 3208 | struct dma_attrs *attrs) |
@@ -3250,7 +3259,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, | |||
3250 | size = PAGE_ALIGN(size); | 3259 | size = PAGE_ALIGN(size); |
3251 | order = get_order(size); | 3260 | order = get_order(size); |
3252 | 3261 | ||
3253 | intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); | 3262 | intel_unmap(dev, dma_handle); |
3254 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) | 3263 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) |
3255 | __free_pages(page, order); | 3264 | __free_pages(page, order); |
3256 | } | 3265 | } |
@@ -3259,43 +3268,7 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
3259 | int nelems, enum dma_data_direction dir, | 3268 | int nelems, enum dma_data_direction dir, |
3260 | struct dma_attrs *attrs) | 3269 | struct dma_attrs *attrs) |
3261 | { | 3270 | { |
3262 | struct dmar_domain *domain; | 3271 | intel_unmap(dev, sglist[0].dma_address); |
3263 | unsigned long start_pfn, last_pfn; | ||
3264 | struct iova *iova; | ||
3265 | struct intel_iommu *iommu; | ||
3266 | struct page *freelist; | ||
3267 | |||
3268 | if (iommu_no_mapping(dev)) | ||
3269 | return; | ||
3270 | |||
3271 | domain = find_domain(dev); | ||
3272 | BUG_ON(!domain); | ||
3273 | |||
3274 | iommu = domain_get_iommu(domain); | ||
3275 | |||
3276 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); | ||
3277 | if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n", | ||
3278 | (unsigned long long)sglist[0].dma_address)) | ||
3279 | return; | ||
3280 | |||
3281 | start_pfn = mm_to_dma_pfn(iova->pfn_lo); | ||
3282 | last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1; | ||
3283 | |||
3284 | freelist = domain_unmap(domain, start_pfn, last_pfn); | ||
3285 | |||
3286 | if (intel_iommu_strict) { | ||
3287 | iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, | ||
3288 | last_pfn - start_pfn + 1, !freelist, 0); | ||
3289 | /* free iova */ | ||
3290 | __free_iova(&domain->iovad, iova); | ||
3291 | dma_free_pagelist(freelist); | ||
3292 | } else { | ||
3293 | add_unmap(domain, iova, freelist); | ||
3294 | /* | ||
3295 | * queue up the release of the unmap to save the 1/6th of the | ||
3296 | * cpu used up by the iotlb flush operation... | ||
3297 | */ | ||
3298 | } | ||
3299 | } | 3272 | } |
3300 | 3273 | ||
3301 | static int intel_nontranslate_map_sg(struct device *hddev, | 3274 | static int intel_nontranslate_map_sg(struct device *hddev, |
@@ -3359,13 +3332,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele | |||
3359 | 3332 | ||
3360 | ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); | 3333 | ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); |
3361 | if (unlikely(ret)) { | 3334 | if (unlikely(ret)) { |
3362 | /* clear the page */ | ||
3363 | dma_pte_clear_range(domain, start_vpfn, | ||
3364 | start_vpfn + size - 1); | ||
3365 | /* free page tables */ | ||
3366 | dma_pte_free_pagetable(domain, start_vpfn, | 3335 | dma_pte_free_pagetable(domain, start_vpfn, |
3367 | start_vpfn + size - 1); | 3336 | start_vpfn + size - 1); |
3368 | /* free iova */ | ||
3369 | __free_iova(&domain->iovad, iova); | 3337 | __free_iova(&domain->iovad, iova); |
3370 | return 0; | 3338 | return 0; |
3371 | } | 3339 | } |