aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2015-12-21 12:47:11 -0500
committerJoerg Roedel <jroedel@suse.de>2015-12-28 11:18:53 -0500
commitab7032bb9c37f9d36ade2267a01a6edf8f2d41d7 (patch)
treeded1550fd1d4923e8d39a2b3fe24acad304103df /drivers/iommu
parent2a87442c5b9858cbfc43eb17da4331551d578d25 (diff)
iommu/amd: Remove need_flush from struct dma_ops_domain
The flushing of iommu tlbs is now done on a per-range basis. So there is no need anymore for domain-wide flush tracking. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c30
1 files changed, 6 insertions, 24 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index faf51a066e98..39a2048a6cd2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -151,9 +151,6 @@ struct dma_ops_domain {
151 151
152 /* address space relevant data */ 152 /* address space relevant data */
153 struct aperture_range *aperture[APERTURE_MAX_RANGES]; 153 struct aperture_range *aperture[APERTURE_MAX_RANGES];
154
155 /* This will be set to true when TLB needs to be flushed */
156 bool need_flush;
157}; 154};
158 155
159/**************************************************************************** 156/****************************************************************************
@@ -1563,7 +1560,7 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
1563 unsigned long align_mask, 1560 unsigned long align_mask,
1564 u64 dma_mask) 1561 u64 dma_mask)
1565{ 1562{
1566 unsigned long next_bit, boundary_size, mask; 1563 unsigned long boundary_size, mask;
1567 unsigned long address = -1; 1564 unsigned long address = -1;
1568 int start = dom->next_index; 1565 int start = dom->next_index;
1569 int i; 1566 int i;
@@ -1581,8 +1578,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
1581 if (!range || range->offset >= dma_mask) 1578 if (!range || range->offset >= dma_mask)
1582 continue; 1579 continue;
1583 1580
1584 next_bit = range->next_bit;
1585
1586 address = dma_ops_aperture_alloc(dom, range, pages, 1581 address = dma_ops_aperture_alloc(dom, range, pages,
1587 dma_mask, boundary_size, 1582 dma_mask, boundary_size,
1588 align_mask); 1583 align_mask);
@@ -1591,9 +1586,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
1591 dom->next_index = i; 1586 dom->next_index = i;
1592 break; 1587 break;
1593 } 1588 }
1594
1595 if (next_bit > range->next_bit)
1596 dom->need_flush = true;
1597 } 1589 }
1598 1590
1599 return address; 1591 return address;
@@ -1609,7 +1601,6 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
1609 1601
1610#ifdef CONFIG_IOMMU_STRESS 1602#ifdef CONFIG_IOMMU_STRESS
1611 dom->next_index = 0; 1603 dom->next_index = 0;
1612 dom->need_flush = true;
1613#endif 1604#endif
1614 1605
1615 address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask); 1606 address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask);
@@ -1642,7 +1633,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1642 return; 1633 return;
1643#endif 1634#endif
1644 1635
1645 if (address + pages > range->next_bit) { 1636 if (amd_iommu_unmap_flush ||
1637 (address + pages > range->next_bit)) {
1646 domain_flush_tlb(&dom->domain); 1638 domain_flush_tlb(&dom->domain);
1647 domain_flush_complete(&dom->domain); 1639 domain_flush_complete(&dom->domain);
1648 } 1640 }
@@ -1868,8 +1860,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
1868 if (!dma_dom->domain.pt_root) 1860 if (!dma_dom->domain.pt_root)
1869 goto free_dma_dom; 1861 goto free_dma_dom;
1870 1862
1871 dma_dom->need_flush = false;
1872
1873 add_domain_to_list(&dma_dom->domain); 1863 add_domain_to_list(&dma_dom->domain);
1874 1864
1875 if (alloc_new_range(dma_dom, true, GFP_KERNEL)) 1865 if (alloc_new_range(dma_dom, true, GFP_KERNEL))
@@ -2503,11 +2493,10 @@ retry:
2503 2493
2504 ADD_STATS_COUNTER(alloced_io_mem, size); 2494 ADD_STATS_COUNTER(alloced_io_mem, size);
2505 2495
2506 if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { 2496 if (unlikely(amd_iommu_np_cache)) {
2507 domain_flush_tlb(&dma_dom->domain);
2508 dma_dom->need_flush = false;
2509 } else if (unlikely(amd_iommu_np_cache))
2510 domain_flush_pages(&dma_dom->domain, address, size); 2497 domain_flush_pages(&dma_dom->domain, address, size);
2498 domain_flush_complete(&dma_dom->domain);
2499 }
2511 2500
2512out: 2501out:
2513 return address; 2502 return address;
@@ -2519,8 +2508,6 @@ out_unmap:
2519 dma_ops_domain_unmap(dma_dom, start); 2508 dma_ops_domain_unmap(dma_dom, start);
2520 } 2509 }
2521 2510
2522 domain_flush_pages(&dma_dom->domain, address, size);
2523
2524 dma_ops_free_addresses(dma_dom, address, pages); 2511 dma_ops_free_addresses(dma_dom, address, pages);
2525 2512
2526 return DMA_ERROR_CODE; 2513 return DMA_ERROR_CODE;
@@ -2553,11 +2540,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
2553 start += PAGE_SIZE; 2540 start += PAGE_SIZE;
2554 } 2541 }
2555 2542
2556 if (amd_iommu_unmap_flush || dma_dom->need_flush) {
2557 domain_flush_pages(&dma_dom->domain, flush_addr, size);
2558 dma_dom->need_flush = false;
2559 }
2560
2561 SUB_STATS_COUNTER(alloced_io_mem, size); 2543 SUB_STATS_COUNTER(alloced_io_mem, size);
2562 2544
2563 dma_ops_free_addresses(dma_dom, dma_addr, pages); 2545 dma_ops_free_addresses(dma_dom, dma_addr, pages);