diff options
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 66237fde758f..093bd526c949 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -928,7 +928,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, | |||
928 | } | 928 | } |
929 | 929 | ||
930 | if (unlikely(address == -1)) | 930 | if (unlikely(address == -1)) |
931 | address = bad_dma_address; | 931 | address = DMA_ERROR_CODE; |
932 | 932 | ||
933 | WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); | 933 | WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); |
934 | 934 | ||
@@ -1544,7 +1544,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, | |||
1544 | 1544 | ||
1545 | pte = dma_ops_get_pte(dom, address); | 1545 | pte = dma_ops_get_pte(dom, address); |
1546 | if (!pte) | 1546 | if (!pte) |
1547 | return bad_dma_address; | 1547 | return DMA_ERROR_CODE; |
1548 | 1548 | ||
1549 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; | 1549 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; |
1550 | 1550 | ||
@@ -1625,7 +1625,7 @@ static dma_addr_t __map_single(struct device *dev, | |||
1625 | retry: | 1625 | retry: |
1626 | address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, | 1626 | address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, |
1627 | dma_mask); | 1627 | dma_mask); |
1628 | if (unlikely(address == bad_dma_address)) { | 1628 | if (unlikely(address == DMA_ERROR_CODE)) { |
1629 | /* | 1629 | /* |
1630 | * setting next_address here will let the address | 1630 | * setting next_address here will let the address |
1631 | * allocator only scan the new allocated range in the | 1631 | * allocator only scan the new allocated range in the |
@@ -1646,7 +1646,7 @@ retry: | |||
1646 | start = address; | 1646 | start = address; |
1647 | for (i = 0; i < pages; ++i) { | 1647 | for (i = 0; i < pages; ++i) { |
1648 | ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); | 1648 | ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); |
1649 | if (ret == bad_dma_address) | 1649 | if (ret == DMA_ERROR_CODE) |
1650 | goto out_unmap; | 1650 | goto out_unmap; |
1651 | 1651 | ||
1652 | paddr += PAGE_SIZE; | 1652 | paddr += PAGE_SIZE; |
@@ -1674,7 +1674,7 @@ out_unmap: | |||
1674 | 1674 | ||
1675 | dma_ops_free_addresses(dma_dom, address, pages); | 1675 | dma_ops_free_addresses(dma_dom, address, pages); |
1676 | 1676 | ||
1677 | return bad_dma_address; | 1677 | return DMA_ERROR_CODE; |
1678 | } | 1678 | } |
1679 | 1679 | ||
1680 | /* | 1680 | /* |
@@ -1690,7 +1690,7 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
1690 | dma_addr_t i, start; | 1690 | dma_addr_t i, start; |
1691 | unsigned int pages; | 1691 | unsigned int pages; |
1692 | 1692 | ||
1693 | if ((dma_addr == bad_dma_address) || | 1693 | if ((dma_addr == DMA_ERROR_CODE) || |
1694 | (dma_addr + size > dma_dom->aperture_size)) | 1694 | (dma_addr + size > dma_dom->aperture_size)) |
1695 | return; | 1695 | return; |
1696 | 1696 | ||
@@ -1732,7 +1732,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
1732 | INC_STATS_COUNTER(cnt_map_single); | 1732 | INC_STATS_COUNTER(cnt_map_single); |
1733 | 1733 | ||
1734 | if (!check_device(dev)) | 1734 | if (!check_device(dev)) |
1735 | return bad_dma_address; | 1735 | return DMA_ERROR_CODE; |
1736 | 1736 | ||
1737 | dma_mask = *dev->dma_mask; | 1737 | dma_mask = *dev->dma_mask; |
1738 | 1738 | ||
@@ -1743,12 +1743,12 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
1743 | return (dma_addr_t)paddr; | 1743 | return (dma_addr_t)paddr; |
1744 | 1744 | ||
1745 | if (!dma_ops_domain(domain)) | 1745 | if (!dma_ops_domain(domain)) |
1746 | return bad_dma_address; | 1746 | return DMA_ERROR_CODE; |
1747 | 1747 | ||
1748 | spin_lock_irqsave(&domain->lock, flags); | 1748 | spin_lock_irqsave(&domain->lock, flags); |
1749 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, | 1749 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, |
1750 | dma_mask); | 1750 | dma_mask); |
1751 | if (addr == bad_dma_address) | 1751 | if (addr == DMA_ERROR_CODE) |
1752 | goto out; | 1752 | goto out; |
1753 | 1753 | ||
1754 | iommu_completion_wait(iommu); | 1754 | iommu_completion_wait(iommu); |
@@ -1957,7 +1957,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1957 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, | 1957 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, |
1958 | size, DMA_BIDIRECTIONAL, true, dma_mask); | 1958 | size, DMA_BIDIRECTIONAL, true, dma_mask); |
1959 | 1959 | ||
1960 | if (*dma_addr == bad_dma_address) { | 1960 | if (*dma_addr == DMA_ERROR_CODE) { |
1961 | spin_unlock_irqrestore(&domain->lock, flags); | 1961 | spin_unlock_irqrestore(&domain->lock, flags); |
1962 | goto out_free; | 1962 | goto out_free; |
1963 | } | 1963 | } |
@@ -2110,7 +2110,6 @@ int __init amd_iommu_init_dma_ops(void) | |||
2110 | prealloc_protection_domains(); | 2110 | prealloc_protection_domains(); |
2111 | 2111 | ||
2112 | iommu_detected = 1; | 2112 | iommu_detected = 1; |
2113 | bad_dma_address = 0; | ||
2114 | swiotlb = 0; | 2113 | swiotlb = 0; |
2115 | #ifdef CONFIG_GART_IOMMU | 2114 | #ifdef CONFIG_GART_IOMMU |
2116 | gart_iommu_aperture_disabled = 1; | 2115 | gart_iommu_aperture_disabled = 1; |