aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2019-05-06 08:24:18 -0400
committerJoerg Roedel <jroedel@suse.de>2019-05-06 08:25:57 -0400
commit89736a0ee81d14439d085c8d4653bc1d86fe64d8 (patch)
treed74a05a2b111c8aff2dab35ca2a6a438baf01cd8
parent1a1079011da32db87e19fcb39e70d082f89da921 (diff)
Revert "iommu/amd: Remove the leftover of bypass support"
This reverts commit 7a5dbf3ab2f04905cf8468c66fcdbfb643068bcb. This commit not only removes the leftovers of bypass support, it also mostly removes the checking of the return value of the get_domain() function. This can lead to silent data corruption bugs when a device is not attached to its dma_ops domain and a DMA-API function is called for that device. Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/amd_iommu.c80
1 files changed, 63 insertions, 17 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index bc98de5fa867..23c1a7eebb06 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -2459,10 +2459,20 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
2459 unsigned long attrs) 2459 unsigned long attrs)
2460{ 2460{
2461 phys_addr_t paddr = page_to_phys(page) + offset; 2461 phys_addr_t paddr = page_to_phys(page) + offset;
2462 struct protection_domain *domain = get_domain(dev); 2462 struct protection_domain *domain;
2463 struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain); 2463 struct dma_ops_domain *dma_dom;
2464 u64 dma_mask;
2465
2466 domain = get_domain(dev);
2467 if (PTR_ERR(domain) == -EINVAL)
2468 return (dma_addr_t)paddr;
2469 else if (IS_ERR(domain))
2470 return DMA_MAPPING_ERROR;
2471
2472 dma_mask = *dev->dma_mask;
2473 dma_dom = to_dma_ops_domain(domain);
2464 2474
2465 return __map_single(dev, dma_dom, paddr, size, dir, *dev->dma_mask); 2475 return __map_single(dev, dma_dom, paddr, size, dir, dma_mask);
2466} 2476}
2467 2477
2468/* 2478/*
@@ -2471,8 +2481,14 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
2471static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 2481static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
2472 enum dma_data_direction dir, unsigned long attrs) 2482 enum dma_data_direction dir, unsigned long attrs)
2473{ 2483{
2474 struct protection_domain *domain = get_domain(dev); 2484 struct protection_domain *domain;
2475 struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain); 2485 struct dma_ops_domain *dma_dom;
2486
2487 domain = get_domain(dev);
2488 if (IS_ERR(domain))
2489 return;
2490
2491 dma_dom = to_dma_ops_domain(domain);
2476 2492
2477 __unmap_single(dma_dom, dma_addr, size, dir); 2493 __unmap_single(dma_dom, dma_addr, size, dir);
2478} 2494}
@@ -2512,13 +2528,20 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
2512 unsigned long attrs) 2528 unsigned long attrs)
2513{ 2529{
2514 int mapped_pages = 0, npages = 0, prot = 0, i; 2530 int mapped_pages = 0, npages = 0, prot = 0, i;
2515 struct protection_domain *domain = get_domain(dev); 2531 struct protection_domain *domain;
2516 struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain); 2532 struct dma_ops_domain *dma_dom;
2517 struct scatterlist *s; 2533 struct scatterlist *s;
2518 unsigned long address; 2534 unsigned long address;
2519 u64 dma_mask = *dev->dma_mask; 2535 u64 dma_mask;
2520 int ret; 2536 int ret;
2521 2537
2538 domain = get_domain(dev);
2539 if (IS_ERR(domain))
2540 return 0;
2541
2542 dma_dom = to_dma_ops_domain(domain);
2543 dma_mask = *dev->dma_mask;
2544
2522 npages = sg_num_pages(dev, sglist, nelems); 2545 npages = sg_num_pages(dev, sglist, nelems);
2523 2546
2524 address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask); 2547 address = dma_ops_alloc_iova(dev, dma_dom, npages, dma_mask);
@@ -2592,11 +2615,20 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
2592 int nelems, enum dma_data_direction dir, 2615 int nelems, enum dma_data_direction dir,
2593 unsigned long attrs) 2616 unsigned long attrs)
2594{ 2617{
2595 struct protection_domain *domain = get_domain(dev); 2618 struct protection_domain *domain;
2596 struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain); 2619 struct dma_ops_domain *dma_dom;
2620 unsigned long startaddr;
2621 int npages = 2;
2622
2623 domain = get_domain(dev);
2624 if (IS_ERR(domain))
2625 return;
2626
2627 startaddr = sg_dma_address(sglist) & PAGE_MASK;
2628 dma_dom = to_dma_ops_domain(domain);
2629 npages = sg_num_pages(dev, sglist, nelems);
2597 2630
2598 __unmap_single(dma_dom, sg_dma_address(sglist) & PAGE_MASK, 2631 __unmap_single(dma_dom, startaddr, npages << PAGE_SHIFT, dir);
2599 sg_num_pages(dev, sglist, nelems) << PAGE_SHIFT, dir);
2600} 2632}
2601 2633
2602/* 2634/*
@@ -2607,11 +2639,16 @@ static void *alloc_coherent(struct device *dev, size_t size,
2607 unsigned long attrs) 2639 unsigned long attrs)
2608{ 2640{
2609 u64 dma_mask = dev->coherent_dma_mask; 2641 u64 dma_mask = dev->coherent_dma_mask;
2610 struct protection_domain *domain = get_domain(dev); 2642 struct protection_domain *domain;
2611 struct dma_ops_domain *dma_dom; 2643 struct dma_ops_domain *dma_dom;
2612 struct page *page; 2644 struct page *page;
2613 2645
2614 if (IS_ERR(domain)) 2646 domain = get_domain(dev);
2647 if (PTR_ERR(domain) == -EINVAL) {
2648 page = alloc_pages(flag, get_order(size));
2649 *dma_addr = page_to_phys(page);
2650 return page_address(page);
2651 } else if (IS_ERR(domain))
2615 return NULL; 2652 return NULL;
2616 2653
2617 dma_dom = to_dma_ops_domain(domain); 2654 dma_dom = to_dma_ops_domain(domain);
@@ -2657,13 +2694,22 @@ static void free_coherent(struct device *dev, size_t size,
2657 void *virt_addr, dma_addr_t dma_addr, 2694 void *virt_addr, dma_addr_t dma_addr,
2658 unsigned long attrs) 2695 unsigned long attrs)
2659{ 2696{
2660 struct protection_domain *domain = get_domain(dev); 2697 struct protection_domain *domain;
2661 struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain); 2698 struct dma_ops_domain *dma_dom;
2662 struct page *page = virt_to_page(virt_addr); 2699 struct page *page;
2663 2700
2701 page = virt_to_page(virt_addr);
2664 size = PAGE_ALIGN(size); 2702 size = PAGE_ALIGN(size);
2665 2703
2704 domain = get_domain(dev);
2705 if (IS_ERR(domain))
2706 goto free_mem;
2707
2708 dma_dom = to_dma_ops_domain(domain);
2709
2666 __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL); 2710 __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
2711
2712free_mem:
2667 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) 2713 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
2668 __free_pages(page, get_order(size)); 2714 __free_pages(page, get_order(size));
2669} 2715}