aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-04-10 12:50:14 -0400
committerJoerg Roedel <jroedel@suse.de>2019-04-11 11:37:21 -0400
commit7a5dbf3ab2f04905cf8468c66fcdbfb643068bcb (patch)
tree1bedb06bb178ee7ae09a0189416ff1122c5fdcea /drivers
parent83d18bdff18f680ce2c0af10a663da19f7dede93 (diff)
iommu/amd: Remove the leftover of bypass support
The AMD iommu dma_ops are only attached on a per-device basis when an actual translation is needed. Remove the leftover bypass support which in parts was already broken (e.g. it always returns 0 from ->map_sg). Use the opportunity to remove a few local variables and move assignments into the declaration line where they were previously separated by the bypass check. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/iommu/amd_iommu.c80
1 files changed, 17 insertions, 63 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 7a0de274934c..f467cc4b498e 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2481,20 +2481,10 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
2481 unsigned long attrs) 2481 unsigned long attrs)
2482{ 2482{
2483 phys_addr_t paddr = page_to_phys(page) + offset; 2483 phys_addr_t paddr = page_to_phys(page) + offset;
2484 struct protection_domain *domain; 2484 struct protection_domain *domain = get_domain(dev);
2485 struct dma_ops_domain *dma_dom; 2485 struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
2486 u64 dma_mask;
2487
2488 domain = get_domain(dev);
2489 if (PTR_ERR(domain) == -EINVAL)
2490 return (dma_addr_t)paddr;
2491 else if (IS_ERR(domain))
2492 return DMA_MAPPING_ERROR;
2493
2494 dma_mask = *dev->dma_mask;
2495 dma_dom = to_dma_ops_domain(domain);
2496 2486
2497 return __map_single(dev, dma_dom, paddr, size, dir, dma_mask); 2487 return __map_single(dev, dma_dom, paddr, size, dir, *dev->dma_mask);
2498} 2488}
2499 2489
2500/* 2490/*
@@ -2503,14 +2493,8 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
2503static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 2493static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2504 enum dma_data_direction dir, unsigned long attrs) 2494 enum dma_data_direction dir, unsigned long attrs)
2505{ 2495{
2506 struct protection_domain *domain; 2496 struct protection_domain *domain = get_domain(dev);
2507 struct dma_ops_domain *dma_dom; 2497 struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
2508
2509 domain = get_domain(dev);
2510 if (IS_ERR(domain))
2511 return;
2512
2513 dma_dom = to_dma_ops_domain(domain);
2514 2498
2515 __unmap_single(dma_dom, dma_addr, size, dir); 2499 __unmap_single(dma_dom, dma_addr, size, dir);
2516} 2500}
@@ -2550,20 +2534,13 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
2550 unsigned long attrs) 2534 unsigned long attrs)
2551{ 2535{
2552 int mapped_pages = 0, npages = 0, prot = 0, i; 2536 int mapped_pages = 0, npages = 0, prot = 0, i;
2553 struct protection_domain *domain; 2537 struct protection_domain *domain = get_domain(dev);
2554 struct dma_ops_domain *dma_dom; 2538 struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
2555 struct scatterlist *s; 2539 struct scatterlist *s;
2556 unsigned long address; 2540 unsigned long address;
2557 u64 dma_mask; 2541 u64 dma_mask = *dev->dma_mask;
2558 int ret; 2542 int ret;
2559 2543
2560 domain = get_domain(dev);
2561 if (IS_ERR(domain))
2562 return 0;
2563
2564 dma_dom = to_dma_ops_domain(domain);
2565 dma_mask = *dev->dma_mask;
2566
2567 npages = sg_num_pages(dev, sglist, nelems); 2544 npages = sg_num_pages(dev, sglist, nelems);
2568 2545
2569 address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask); 2546 address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
@@ -2635,20 +2612,11 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2635 int nelems, enum dma_data_direction dir, 2612 int nelems, enum dma_data_direction dir,
2636 unsigned long attrs) 2613 unsigned long attrs)
2637{ 2614{
2638 struct protection_domain *domain; 2615 struct protection_domain *domain = get_domain(dev);
2639 struct dma_ops_domain *dma_dom; 2616 struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
2640 unsigned long startaddr;
2641 int npages = 2;
2642
2643 domain = get_domain(dev);
2644 if (IS_ERR(domain))
2645 return;
2646
2647 startaddr = sg_dma_address(sglist) & PAGE_MASK;
2648 dma_dom = to_dma_ops_domain(domain);
2649 npages = sg_num_pages(dev, sglist, nelems);
2650 2617
2651 __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir); 2618 __unmap_single(dma_dom, sg_dma_address(sglist) & PAGE_MASK,
2619 sg_num_pages(dev, sglist, nelems) << PAGE_SHIFT, dir);
2652} 2620}
2653 2621
2654/* 2622/*
@@ -2659,16 +2627,11 @@ static void *alloc_coherent(struct device *dev, size_t size,
2659 unsigned long attrs) 2627 unsigned long attrs)
2660{ 2628{
2661 u64 dma_mask = dev->coherent_dma_mask; 2629 u64 dma_mask = dev->coherent_dma_mask;
2662 struct protection_domain *domain; 2630 struct protection_domain *domain = get_domain(dev);
2663 struct dma_ops_domain *dma_dom; 2631 struct dma_ops_domain *dma_dom;
2664 struct page *page; 2632 struct page *page;
2665 2633
2666 domain = get_domain(dev); 2634 if (IS_ERR(domain))
2667 if (PTR_ERR(domain) == -EINVAL) {
2668 page = alloc_pages(flag, get_order(size));
2669 *dma_addr = page_to_phys(page);
2670 return page_address(page);
2671 } else if (IS_ERR(domain))
2672 return NULL; 2635 return NULL;
2673 2636
2674 dma_dom = to_dma_ops_domain(domain); 2637 dma_dom = to_dma_ops_domain(domain);
@@ -2714,22 +2677,13 @@ static void free_coherent(struct device *dev, size_t size,
2714 void *virt_addr, dma_addr_t dma_addr, 2677 void *virt_addr, dma_addr_t dma_addr,
2715 unsigned long attrs) 2678 unsigned long attrs)
2716{ 2679{
2717 struct protection_domain *domain; 2680 struct protection_domain *domain = get_domain(dev);
2718 struct dma_ops_domain *dma_dom; 2681 struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
2719 struct page *page; 2682 struct page *page = virt_to_page(virt_addr);
2720 2683
2721 page = virt_to_page(virt_addr);
2722 size = PAGE_ALIGN(size); 2684 size = PAGE_ALIGN(size);
2723 2685
2724 domain = get_domain(dev);
2725 if (IS_ERR(domain))
2726 goto free_mem;
2727
2728 dma_dom = to_dma_ops_domain(domain);
2729
2730 __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL); 2686 __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
2731
2732free_mem:
2733 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) 2687 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2734 __free_pages(page, get_order(size)); 2688 __free_pages(page, get_order(size));
2735} 2689}