diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-11-23 13:33:56 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-11-27 08:17:08 -0500 |
commit | cd8c82e875c27ee0d8b59fb76bc12aa9db6a70c2 (patch) | |
tree | 653ff6e7506822fb8d61fe6259abb3ba01e7b139 /arch/x86/kernel/amd_iommu.c | |
parent | 576175c2503ae9b0f930ee9a6a0abaf7ef8956ad (diff) |
x86/amd-iommu: Remove iommu parameter from __(un)map_single
With the prior changes this parameter is not longer
required. This patch removes it from the function and all
callers.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 687f617b95d7..c04dcb7f40b2 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1653,7 +1653,6 @@ static void dma_ops_domain_unmap(struct dma_ops_domain *dom, | |||
1653 | * Must be called with the domain lock held. | 1653 | * Must be called with the domain lock held. |
1654 | */ | 1654 | */ |
1655 | static dma_addr_t __map_single(struct device *dev, | 1655 | static dma_addr_t __map_single(struct device *dev, |
1656 | struct amd_iommu *iommu, | ||
1657 | struct dma_ops_domain *dma_dom, | 1656 | struct dma_ops_domain *dma_dom, |
1658 | phys_addr_t paddr, | 1657 | phys_addr_t paddr, |
1659 | size_t size, | 1658 | size_t size, |
@@ -1737,8 +1736,7 @@ out_unmap: | |||
1737 | * Does the reverse of the __map_single function. Must be called with | 1736 | * Does the reverse of the __map_single function. Must be called with |
1738 | * the domain lock held too | 1737 | * the domain lock held too |
1739 | */ | 1738 | */ |
1740 | static void __unmap_single(struct amd_iommu *iommu, | 1739 | static void __unmap_single(struct dma_ops_domain *dma_dom, |
1741 | struct dma_ops_domain *dma_dom, | ||
1742 | dma_addr_t dma_addr, | 1740 | dma_addr_t dma_addr, |
1743 | size_t size, | 1741 | size_t size, |
1744 | int dir) | 1742 | int dir) |
@@ -1797,7 +1795,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
1797 | return DMA_ERROR_CODE; | 1795 | return DMA_ERROR_CODE; |
1798 | 1796 | ||
1799 | spin_lock_irqsave(&domain->lock, flags); | 1797 | spin_lock_irqsave(&domain->lock, flags); |
1800 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, | 1798 | addr = __map_single(dev, domain->priv, paddr, size, dir, false, |
1801 | dma_mask); | 1799 | dma_mask); |
1802 | if (addr == DMA_ERROR_CODE) | 1800 | if (addr == DMA_ERROR_CODE) |
1803 | goto out; | 1801 | goto out; |
@@ -1832,7 +1830,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
1832 | 1830 | ||
1833 | spin_lock_irqsave(&domain->lock, flags); | 1831 | spin_lock_irqsave(&domain->lock, flags); |
1834 | 1832 | ||
1835 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | 1833 | __unmap_single(domain->priv, dma_addr, size, dir); |
1836 | 1834 | ||
1837 | iommu_flush_complete(domain); | 1835 | iommu_flush_complete(domain); |
1838 | 1836 | ||
@@ -1890,7 +1888,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
1890 | for_each_sg(sglist, s, nelems, i) { | 1888 | for_each_sg(sglist, s, nelems, i) { |
1891 | paddr = sg_phys(s); | 1889 | paddr = sg_phys(s); |
1892 | 1890 | ||
1893 | s->dma_address = __map_single(dev, iommu, domain->priv, | 1891 | s->dma_address = __map_single(dev, domain->priv, |
1894 | paddr, s->length, dir, false, | 1892 | paddr, s->length, dir, false, |
1895 | dma_mask); | 1893 | dma_mask); |
1896 | 1894 | ||
@@ -1910,7 +1908,7 @@ out: | |||
1910 | unmap: | 1908 | unmap: |
1911 | for_each_sg(sglist, s, mapped_elems, i) { | 1909 | for_each_sg(sglist, s, mapped_elems, i) { |
1912 | if (s->dma_address) | 1910 | if (s->dma_address) |
1913 | __unmap_single(iommu, domain->priv, s->dma_address, | 1911 | __unmap_single(domain->priv, s->dma_address, |
1914 | s->dma_length, dir); | 1912 | s->dma_length, dir); |
1915 | s->dma_address = s->dma_length = 0; | 1913 | s->dma_address = s->dma_length = 0; |
1916 | } | 1914 | } |
@@ -1946,7 +1944,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
1946 | spin_lock_irqsave(&domain->lock, flags); | 1944 | spin_lock_irqsave(&domain->lock, flags); |
1947 | 1945 | ||
1948 | for_each_sg(sglist, s, nelems, i) { | 1946 | for_each_sg(sglist, s, nelems, i) { |
1949 | __unmap_single(iommu, domain->priv, s->dma_address, | 1947 | __unmap_single(domain->priv, s->dma_address, |
1950 | s->dma_length, dir); | 1948 | s->dma_length, dir); |
1951 | s->dma_address = s->dma_length = 0; | 1949 | s->dma_address = s->dma_length = 0; |
1952 | } | 1950 | } |
@@ -1996,7 +1994,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1996 | 1994 | ||
1997 | spin_lock_irqsave(&domain->lock, flags); | 1995 | spin_lock_irqsave(&domain->lock, flags); |
1998 | 1996 | ||
1999 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, | 1997 | *dma_addr = __map_single(dev, domain->priv, paddr, |
2000 | size, DMA_BIDIRECTIONAL, true, dma_mask); | 1998 | size, DMA_BIDIRECTIONAL, true, dma_mask); |
2001 | 1999 | ||
2002 | if (*dma_addr == DMA_ERROR_CODE) { | 2000 | if (*dma_addr == DMA_ERROR_CODE) { |
@@ -2038,7 +2036,7 @@ static void free_coherent(struct device *dev, size_t size, | |||
2038 | 2036 | ||
2039 | spin_lock_irqsave(&domain->lock, flags); | 2037 | spin_lock_irqsave(&domain->lock, flags); |
2040 | 2038 | ||
2041 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 2039 | __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); |
2042 | 2040 | ||
2043 | iommu_flush_complete(domain); | 2041 | iommu_flush_complete(domain); |
2044 | 2042 | ||