aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2016-07-05 08:23:01 -0400
committerJoerg Roedel <jroedel@suse.de>2016-07-13 06:46:05 -0400
commit256e4621c21aa1bf704e1a12e643923fdb732d04 (patch)
tree429cbe164c2139ee670da730d63ca69dbc209e85 /drivers/iommu/amd_iommu.c
parent518d9b450387a3508363af58d1f62db9fc92d438 (diff)
iommu/amd: Make use of the generic IOVA allocator
Remove the old address allocation code and make use of the generic IOVA allocator that is also used by other dma-ops implementations. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c183
1 files changed, 26 insertions, 157 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 1cd57a392558..77be2d0558bd 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1649,167 +1649,32 @@ out_free:
1649 return -ENOMEM; 1649 return -ENOMEM;
1650} 1650}
1651 1651
1652static dma_addr_t dma_ops_aperture_alloc(struct dma_ops_domain *dom, 1652static unsigned long dma_ops_alloc_iova(struct device *dev,
1653 struct aperture_range *range, 1653 struct dma_ops_domain *dma_dom,
1654 unsigned long pages, 1654 unsigned int pages, u64 dma_mask)
1655 unsigned long dma_mask,
1656 unsigned long boundary_size,
1657 unsigned long align_mask,
1658 bool trylock)
1659{
1660 unsigned long offset, limit, flags;
1661 dma_addr_t address;
1662 bool flush = false;
1663
1664 offset = range->offset >> PAGE_SHIFT;
1665 limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
1666 dma_mask >> PAGE_SHIFT);
1667
1668 if (trylock) {
1669 if (!spin_trylock_irqsave(&range->bitmap_lock, flags))
1670 return -1;
1671 } else {
1672 spin_lock_irqsave(&range->bitmap_lock, flags);
1673 }
1674
1675 address = iommu_area_alloc(range->bitmap, limit, range->next_bit,
1676 pages, offset, boundary_size, align_mask);
1677 if (address == -1) {
1678 /* Nothing found, retry one time */
1679 address = iommu_area_alloc(range->bitmap, limit,
1680 0, pages, offset, boundary_size,
1681 align_mask);
1682 flush = true;
1683 }
1684
1685 if (address != -1)
1686 range->next_bit = address + pages;
1687
1688 spin_unlock_irqrestore(&range->bitmap_lock, flags);
1689
1690 if (flush) {
1691 domain_flush_tlb(&dom->domain);
1692 domain_flush_complete(&dom->domain);
1693 }
1694
1695 return address;
1696}
1697
1698static unsigned long dma_ops_area_alloc(struct device *dev,
1699 struct dma_ops_domain *dom,
1700 unsigned int pages,
1701 unsigned long align_mask,
1702 u64 dma_mask)
1703{ 1655{
1704 unsigned long boundary_size, mask; 1656 unsigned long pfn = 0;
1705 unsigned long address = -1;
1706 bool first = true;
1707 u32 start, i;
1708
1709 preempt_disable();
1710
1711 mask = dma_get_seg_boundary(dev);
1712
1713again:
1714 start = this_cpu_read(*dom->next_index);
1715
1716 /* Sanity check - is it really necessary? */
1717 if (unlikely(start > APERTURE_MAX_RANGES)) {
1718 start = 0;
1719 this_cpu_write(*dom->next_index, 0);
1720 }
1721
1722 boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
1723 1UL << (BITS_PER_LONG - PAGE_SHIFT);
1724 1657
1725 for (i = 0; i < APERTURE_MAX_RANGES; ++i) { 1658 pages = __roundup_pow_of_two(pages);
1726 struct aperture_range *range;
1727 int index;
1728
1729 index = (start + i) % APERTURE_MAX_RANGES;
1730 1659
1731 range = dom->aperture[index]; 1660 if (dma_mask > DMA_BIT_MASK(32))
1732 1661 pfn = alloc_iova_fast(&dma_dom->iovad, pages,
1733 if (!range || range->offset >= dma_mask) 1662 IOVA_PFN(DMA_BIT_MASK(32)));
1734 continue;
1735
1736 address = dma_ops_aperture_alloc(dom, range, pages,
1737 dma_mask, boundary_size,
1738 align_mask, first);
1739 if (address != -1) {
1740 address = range->offset + (address << PAGE_SHIFT);
1741 this_cpu_write(*dom->next_index, index);
1742 break;
1743 }
1744 }
1745
1746 if (address == -1 && first) {
1747 first = false;
1748 goto again;
1749 }
1750 1663
1751 preempt_enable(); 1664 if (!pfn)
1665 pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask));
1752 1666
1753 return address; 1667 return (pfn << PAGE_SHIFT);
1754} 1668}
1755 1669
1756static unsigned long dma_ops_alloc_addresses(struct device *dev, 1670static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
1757 struct dma_ops_domain *dom, 1671 unsigned long address,
1758 unsigned int pages, 1672 unsigned int pages)
1759 unsigned long align_mask,
1760 u64 dma_mask)
1761{ 1673{
1762 unsigned long address = -1; 1674 pages = __roundup_pow_of_two(pages);
1763 1675 address >>= PAGE_SHIFT;
1764 while (address == -1) {
1765 address = dma_ops_area_alloc(dev, dom, pages,
1766 align_mask, dma_mask);
1767
1768 if (address == -1 && alloc_new_range(dom, false, GFP_ATOMIC))
1769 break;
1770 }
1771
1772 if (unlikely(address == -1))
1773 address = DMA_ERROR_CODE;
1774
1775 WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
1776
1777 return address;
1778}
1779
1780/*
1781 * The address free function.
1782 *
1783 * called with domain->lock held
1784 */
1785static void dma_ops_free_addresses(struct dma_ops_domain *dom,
1786 unsigned long address,
1787 unsigned int pages)
1788{
1789 unsigned i = address >> APERTURE_RANGE_SHIFT;
1790 struct aperture_range *range = dom->aperture[i];
1791 unsigned long flags;
1792
1793 BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
1794
1795#ifdef CONFIG_IOMMU_STRESS
1796 if (i < 4)
1797 return;
1798#endif
1799
1800 if (amd_iommu_unmap_flush) {
1801 domain_flush_tlb(&dom->domain);
1802 domain_flush_complete(&dom->domain);
1803 }
1804
1805 address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
1806
1807 spin_lock_irqsave(&range->bitmap_lock, flags);
1808 if (address + pages > range->next_bit)
1809 range->next_bit = address + pages;
1810 bitmap_clear(range->bitmap, address, pages);
1811 spin_unlock_irqrestore(&range->bitmap_lock, flags);
1812 1676
1677 free_iova_fast(&dma_dom->iovad, address, pages);
1813} 1678}
1814 1679
1815/**************************************************************************** 1680/****************************************************************************
@@ -2586,9 +2451,7 @@ static dma_addr_t __map_single(struct device *dev,
2586 if (align) 2451 if (align)
2587 align_mask = (1UL << get_order(size)) - 1; 2452 align_mask = (1UL << get_order(size)) - 1;
2588 2453
2589 address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, 2454 address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
2590 dma_mask);
2591
2592 if (address == DMA_ERROR_CODE) 2455 if (address == DMA_ERROR_CODE)
2593 goto out; 2456 goto out;
2594 2457
@@ -2626,7 +2489,10 @@ out_unmap:
2626 iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE); 2489 iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
2627 } 2490 }
2628 2491
2629 dma_ops_free_addresses(dma_dom, address, pages); 2492 domain_flush_tlb(&dma_dom->domain);
2493 domain_flush_complete(&dma_dom->domain);
2494
2495 dma_ops_free_iova(dma_dom, address, pages);
2630 2496
2631 return DMA_ERROR_CODE; 2497 return DMA_ERROR_CODE;
2632} 2498}
@@ -2658,7 +2524,10 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
2658 start += PAGE_SIZE; 2524 start += PAGE_SIZE;
2659 } 2525 }
2660 2526
2661 dma_ops_free_addresses(dma_dom, dma_addr, pages); 2527 domain_flush_tlb(&dma_dom->domain);
2528 domain_flush_complete(&dma_dom->domain);
2529
2530 dma_ops_free_iova(dma_dom, dma_addr, pages);
2662} 2531}
2663 2532
2664/* 2533/*