diff options
author | Joerg Roedel <jroedel@suse.de> | 2016-07-08 07:31:31 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2016-07-14 04:21:57 -0400 |
commit | b3311b061de2e51db683a67092546876839df532 (patch) | |
tree | 0dbcb2bca12165ed2ebcda37098d3250a73d6472 /drivers/iommu/amd_iommu.c | |
parent | 281e8ccbff172899a60579773e72ad63d58b3770 (diff) |
iommu/amd: Use container_of to get dma_ops_domain
This is better than storing an extra pointer in struct
protection_domain, because this pointer can now be removed
from the struct.
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r-- | drivers/iommu/amd_iommu.c | 36 |
1 files changed, 26 insertions, 10 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index d13a18633dce..fb43cc5857c7 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -231,6 +231,12 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom) | |||
231 | return container_of(dom, struct protection_domain, domain); | 231 | return container_of(dom, struct protection_domain, domain); |
232 | } | 232 | } |
233 | 233 | ||
234 | static struct dma_ops_domain* to_dma_ops_domain(struct protection_domain *domain) | ||
235 | { | ||
236 | BUG_ON(domain->flags != PD_DMA_OPS_MASK); | ||
237 | return container_of(domain, struct dma_ops_domain, domain); | ||
238 | } | ||
239 | |||
234 | static struct iommu_dev_data *alloc_dev_data(u16 devid) | 240 | static struct iommu_dev_data *alloc_dev_data(u16 devid) |
235 | { | 241 | { |
236 | struct iommu_dev_data *dev_data; | 242 | struct iommu_dev_data *dev_data; |
@@ -1670,7 +1676,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) | |||
1670 | dma_dom->domain.mode = PAGE_MODE_2_LEVEL; | 1676 | dma_dom->domain.mode = PAGE_MODE_2_LEVEL; |
1671 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); | 1677 | dma_dom->domain.pt_root = (void *)get_zeroed_page(GFP_KERNEL); |
1672 | dma_dom->domain.flags = PD_DMA_OPS_MASK; | 1678 | dma_dom->domain.flags = PD_DMA_OPS_MASK; |
1673 | dma_dom->domain.priv = dma_dom; | ||
1674 | if (!dma_dom->domain.pt_root) | 1679 | if (!dma_dom->domain.pt_root) |
1675 | goto free_dma_dom; | 1680 | goto free_dma_dom; |
1676 | 1681 | ||
@@ -2367,6 +2372,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
2367 | { | 2372 | { |
2368 | phys_addr_t paddr = page_to_phys(page) + offset; | 2373 | phys_addr_t paddr = page_to_phys(page) + offset; |
2369 | struct protection_domain *domain; | 2374 | struct protection_domain *domain; |
2375 | struct dma_ops_domain *dma_dom; | ||
2370 | u64 dma_mask; | 2376 | u64 dma_mask; |
2371 | 2377 | ||
2372 | domain = get_domain(dev); | 2378 | domain = get_domain(dev); |
@@ -2376,8 +2382,9 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
2376 | return DMA_ERROR_CODE; | 2382 | return DMA_ERROR_CODE; |
2377 | 2383 | ||
2378 | dma_mask = *dev->dma_mask; | 2384 | dma_mask = *dev->dma_mask; |
2385 | dma_dom = to_dma_ops_domain(domain); | ||
2379 | 2386 | ||
2380 | return __map_single(dev, domain->priv, paddr, size, dir, dma_mask); | 2387 | return __map_single(dev, dma_dom, paddr, size, dir, dma_mask); |
2381 | } | 2388 | } |
2382 | 2389 | ||
2383 | /* | 2390 | /* |
@@ -2387,12 +2394,15 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
2387 | enum dma_data_direction dir, struct dma_attrs *attrs) | 2394 | enum dma_data_direction dir, struct dma_attrs *attrs) |
2388 | { | 2395 | { |
2389 | struct protection_domain *domain; | 2396 | struct protection_domain *domain; |
2397 | struct dma_ops_domain *dma_dom; | ||
2390 | 2398 | ||
2391 | domain = get_domain(dev); | 2399 | domain = get_domain(dev); |
2392 | if (IS_ERR(domain)) | 2400 | if (IS_ERR(domain)) |
2393 | return; | 2401 | return; |
2394 | 2402 | ||
2395 | __unmap_single(domain->priv, dma_addr, size, dir); | 2403 | dma_dom = to_dma_ops_domain(domain); |
2404 | |||
2405 | __unmap_single(dma_dom, dma_addr, size, dir); | ||
2396 | } | 2406 | } |
2397 | 2407 | ||
2398 | static int sg_num_pages(struct device *dev, | 2408 | static int sg_num_pages(struct device *dev, |
@@ -2440,7 +2450,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
2440 | if (IS_ERR(domain)) | 2450 | if (IS_ERR(domain)) |
2441 | return 0; | 2451 | return 0; |
2442 | 2452 | ||
2443 | dma_dom = domain->priv; | 2453 | dma_dom = to_dma_ops_domain(domain); |
2444 | dma_mask = *dev->dma_mask; | 2454 | dma_mask = *dev->dma_mask; |
2445 | 2455 | ||
2446 | npages = sg_num_pages(dev, sglist, nelems); | 2456 | npages = sg_num_pages(dev, sglist, nelems); |
@@ -2511,6 +2521,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
2511 | struct dma_attrs *attrs) | 2521 | struct dma_attrs *attrs) |
2512 | { | 2522 | { |
2513 | struct protection_domain *domain; | 2523 | struct protection_domain *domain; |
2524 | struct dma_ops_domain *dma_dom; | ||
2514 | unsigned long startaddr; | 2525 | unsigned long startaddr; |
2515 | int npages = 2; | 2526 | int npages = 2; |
2516 | 2527 | ||
@@ -2519,9 +2530,10 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
2519 | return; | 2530 | return; |
2520 | 2531 | ||
2521 | startaddr = sg_dma_address(sglist) & PAGE_MASK; | 2532 | startaddr = sg_dma_address(sglist) & PAGE_MASK; |
2533 | dma_dom = to_dma_ops_domain(domain); | ||
2522 | npages = sg_num_pages(dev, sglist, nelems); | 2534 | npages = sg_num_pages(dev, sglist, nelems); |
2523 | 2535 | ||
2524 | __unmap_single(domain->priv, startaddr, npages << PAGE_SHIFT, dir); | 2536 | __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir); |
2525 | } | 2537 | } |
2526 | 2538 | ||
2527 | /* | 2539 | /* |
@@ -2533,6 +2545,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
2533 | { | 2545 | { |
2534 | u64 dma_mask = dev->coherent_dma_mask; | 2546 | u64 dma_mask = dev->coherent_dma_mask; |
2535 | struct protection_domain *domain; | 2547 | struct protection_domain *domain; |
2548 | struct dma_ops_domain *dma_dom; | ||
2536 | struct page *page; | 2549 | struct page *page; |
2537 | 2550 | ||
2538 | domain = get_domain(dev); | 2551 | domain = get_domain(dev); |
@@ -2543,6 +2556,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
2543 | } else if (IS_ERR(domain)) | 2556 | } else if (IS_ERR(domain)) |
2544 | return NULL; | 2557 | return NULL; |
2545 | 2558 | ||
2559 | dma_dom = to_dma_ops_domain(domain); | ||
2546 | size = PAGE_ALIGN(size); | 2560 | size = PAGE_ALIGN(size); |
2547 | dma_mask = dev->coherent_dma_mask; | 2561 | dma_mask = dev->coherent_dma_mask; |
2548 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | 2562 | flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
@@ -2562,7 +2576,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
2562 | if (!dma_mask) | 2576 | if (!dma_mask) |
2563 | dma_mask = *dev->dma_mask; | 2577 | dma_mask = *dev->dma_mask; |
2564 | 2578 | ||
2565 | *dma_addr = __map_single(dev, domain->priv, page_to_phys(page), | 2579 | *dma_addr = __map_single(dev, dma_dom, page_to_phys(page), |
2566 | size, DMA_BIDIRECTIONAL, dma_mask); | 2580 | size, DMA_BIDIRECTIONAL, dma_mask); |
2567 | 2581 | ||
2568 | if (*dma_addr == DMA_ERROR_CODE) | 2582 | if (*dma_addr == DMA_ERROR_CODE) |
@@ -2586,6 +2600,7 @@ static void free_coherent(struct device *dev, size_t size, | |||
2586 | struct dma_attrs *attrs) | 2600 | struct dma_attrs *attrs) |
2587 | { | 2601 | { |
2588 | struct protection_domain *domain; | 2602 | struct protection_domain *domain; |
2603 | struct dma_ops_domain *dma_dom; | ||
2589 | struct page *page; | 2604 | struct page *page; |
2590 | 2605 | ||
2591 | page = virt_to_page(virt_addr); | 2606 | page = virt_to_page(virt_addr); |
@@ -2595,7 +2610,9 @@ static void free_coherent(struct device *dev, size_t size, | |||
2595 | if (IS_ERR(domain)) | 2610 | if (IS_ERR(domain)) |
2596 | goto free_mem; | 2611 | goto free_mem; |
2597 | 2612 | ||
2598 | __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 2613 | dma_dom = to_dma_ops_domain(domain); |
2614 | |||
2615 | __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL); | ||
2599 | 2616 | ||
2600 | free_mem: | 2617 | free_mem: |
2601 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) | 2618 | if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) |
@@ -2888,7 +2905,7 @@ static void amd_iommu_domain_free(struct iommu_domain *dom) | |||
2888 | queue_flush_all(); | 2905 | queue_flush_all(); |
2889 | 2906 | ||
2890 | /* Now release the domain */ | 2907 | /* Now release the domain */ |
2891 | dma_dom = domain->priv; | 2908 | dma_dom = to_dma_ops_domain(domain); |
2892 | dma_ops_domain_free(dma_dom); | 2909 | dma_ops_domain_free(dma_dom); |
2893 | break; | 2910 | break; |
2894 | default: | 2911 | default: |
@@ -3076,8 +3093,7 @@ static void amd_iommu_apply_dm_region(struct device *dev, | |||
3076 | struct iommu_domain *domain, | 3093 | struct iommu_domain *domain, |
3077 | struct iommu_dm_region *region) | 3094 | struct iommu_dm_region *region) |
3078 | { | 3095 | { |
3079 | struct protection_domain *pdomain = to_pdomain(domain); | 3096 | struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain)); |
3080 | struct dma_ops_domain *dma_dom = pdomain->priv; | ||
3081 | unsigned long start, end; | 3097 | unsigned long start, end; |
3082 | 3098 | ||
3083 | start = IOVA_PFN(region->start); | 3099 | start = IOVA_PFN(region->start); |