diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-11-15 07:19:53 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-17 01:53:21 -0500 |
commit | 8fd524b355daef0945692227e726fb444cebcd4f (patch) | |
tree | d9c067e6e2693d46b70010a4ed2910205a59c92d /arch/x86/kernel/amd_iommu.c | |
parent | 42109197eb7c01080eea6d9cd48ca23cbc3c566c (diff) |
x86: Kill bad_dma_address variable
This kills bad_dma_address variable, the old mechanism to enable
IOMMU drivers to make dma_mapping_error() work in IOMMU's
specific way.
bad_dma_address variable was introduced to enable IOMMU drivers
to make dma_mapping_error() work in IOMMU's specific way.
However, it can't handle systems that use both swiotlb and HW
IOMMU. SO we introduced dma_map_ops->mapping_error to solve that
case.
Intel VT-d, GART, and swiotlb already use
dma_map_ops->mapping_error. Calgary, AMD IOMMU, and nommu use
zero for an error dma address. This adds DMA_ERROR_CODE and
converts them to use it (as SPARC and POWER does).
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Acked-by: Jesse Barnes <jbarnes@virtuousgeek.org>
Cc: muli@il.ibm.com
Cc: joerg.roedel@amd.com
LKML-Reference: <1258287594-8777-3-git-send-email-fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 66237fde758f..093bd526c949 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -928,7 +928,7 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, | |||
928 | } | 928 | } |
929 | 929 | ||
930 | if (unlikely(address == -1)) | 930 | if (unlikely(address == -1)) |
931 | address = bad_dma_address; | 931 | address = DMA_ERROR_CODE; |
932 | 932 | ||
933 | WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); | 933 | WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size); |
934 | 934 | ||
@@ -1544,7 +1544,7 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, | |||
1544 | 1544 | ||
1545 | pte = dma_ops_get_pte(dom, address); | 1545 | pte = dma_ops_get_pte(dom, address); |
1546 | if (!pte) | 1546 | if (!pte) |
1547 | return bad_dma_address; | 1547 | return DMA_ERROR_CODE; |
1548 | 1548 | ||
1549 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; | 1549 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; |
1550 | 1550 | ||
@@ -1625,7 +1625,7 @@ static dma_addr_t __map_single(struct device *dev, | |||
1625 | retry: | 1625 | retry: |
1626 | address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, | 1626 | address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask, |
1627 | dma_mask); | 1627 | dma_mask); |
1628 | if (unlikely(address == bad_dma_address)) { | 1628 | if (unlikely(address == DMA_ERROR_CODE)) { |
1629 | /* | 1629 | /* |
1630 | * setting next_address here will let the address | 1630 | * setting next_address here will let the address |
1631 | * allocator only scan the new allocated range in the | 1631 | * allocator only scan the new allocated range in the |
@@ -1646,7 +1646,7 @@ retry: | |||
1646 | start = address; | 1646 | start = address; |
1647 | for (i = 0; i < pages; ++i) { | 1647 | for (i = 0; i < pages; ++i) { |
1648 | ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); | 1648 | ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); |
1649 | if (ret == bad_dma_address) | 1649 | if (ret == DMA_ERROR_CODE) |
1650 | goto out_unmap; | 1650 | goto out_unmap; |
1651 | 1651 | ||
1652 | paddr += PAGE_SIZE; | 1652 | paddr += PAGE_SIZE; |
@@ -1674,7 +1674,7 @@ out_unmap: | |||
1674 | 1674 | ||
1675 | dma_ops_free_addresses(dma_dom, address, pages); | 1675 | dma_ops_free_addresses(dma_dom, address, pages); |
1676 | 1676 | ||
1677 | return bad_dma_address; | 1677 | return DMA_ERROR_CODE; |
1678 | } | 1678 | } |
1679 | 1679 | ||
1680 | /* | 1680 | /* |
@@ -1690,7 +1690,7 @@ static void __unmap_single(struct amd_iommu *iommu, | |||
1690 | dma_addr_t i, start; | 1690 | dma_addr_t i, start; |
1691 | unsigned int pages; | 1691 | unsigned int pages; |
1692 | 1692 | ||
1693 | if ((dma_addr == bad_dma_address) || | 1693 | if ((dma_addr == DMA_ERROR_CODE) || |
1694 | (dma_addr + size > dma_dom->aperture_size)) | 1694 | (dma_addr + size > dma_dom->aperture_size)) |
1695 | return; | 1695 | return; |
1696 | 1696 | ||
@@ -1732,7 +1732,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
1732 | INC_STATS_COUNTER(cnt_map_single); | 1732 | INC_STATS_COUNTER(cnt_map_single); |
1733 | 1733 | ||
1734 | if (!check_device(dev)) | 1734 | if (!check_device(dev)) |
1735 | return bad_dma_address; | 1735 | return DMA_ERROR_CODE; |
1736 | 1736 | ||
1737 | dma_mask = *dev->dma_mask; | 1737 | dma_mask = *dev->dma_mask; |
1738 | 1738 | ||
@@ -1743,12 +1743,12 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
1743 | return (dma_addr_t)paddr; | 1743 | return (dma_addr_t)paddr; |
1744 | 1744 | ||
1745 | if (!dma_ops_domain(domain)) | 1745 | if (!dma_ops_domain(domain)) |
1746 | return bad_dma_address; | 1746 | return DMA_ERROR_CODE; |
1747 | 1747 | ||
1748 | spin_lock_irqsave(&domain->lock, flags); | 1748 | spin_lock_irqsave(&domain->lock, flags); |
1749 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, | 1749 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, |
1750 | dma_mask); | 1750 | dma_mask); |
1751 | if (addr == bad_dma_address) | 1751 | if (addr == DMA_ERROR_CODE) |
1752 | goto out; | 1752 | goto out; |
1753 | 1753 | ||
1754 | iommu_completion_wait(iommu); | 1754 | iommu_completion_wait(iommu); |
@@ -1957,7 +1957,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1957 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, | 1957 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, |
1958 | size, DMA_BIDIRECTIONAL, true, dma_mask); | 1958 | size, DMA_BIDIRECTIONAL, true, dma_mask); |
1959 | 1959 | ||
1960 | if (*dma_addr == bad_dma_address) { | 1960 | if (*dma_addr == DMA_ERROR_CODE) { |
1961 | spin_unlock_irqrestore(&domain->lock, flags); | 1961 | spin_unlock_irqrestore(&domain->lock, flags); |
1962 | goto out_free; | 1962 | goto out_free; |
1963 | } | 1963 | } |
@@ -2110,7 +2110,6 @@ int __init amd_iommu_init_dma_ops(void) | |||
2110 | prealloc_protection_domains(); | 2110 | prealloc_protection_domains(); |
2111 | 2111 | ||
2112 | iommu_detected = 1; | 2112 | iommu_detected = 1; |
2113 | bad_dma_address = 0; | ||
2114 | swiotlb = 0; | 2113 | swiotlb = 0; |
2115 | #ifdef CONFIG_GART_IOMMU | 2114 | #ifdef CONFIG_GART_IOMMU |
2116 | gart_iommu_aperture_disabled = 1; | 2115 | gart_iommu_aperture_disabled = 1; |