diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-11-20 10:00:05 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-11-27 05:45:50 -0500 |
commit | 0518a3a4585cb3eeeaf14ca57131f11d252130c6 (patch) | |
tree | 93efaf790ffbb6c51086573273a63ccc39f1532c /arch | |
parent | c459611424d8b8396060eb766e23bd0c70c993bc (diff) |
x86/amd-iommu: Add function to complete a tlb flush
This patch adds a function to the AMD IOMMU driver which
completes all queued commands an all IOMMUs a specific
domain has devices attached on. This is required in a later
patch when per-domain flushing is implemented.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 28 |
1 files changed, 22 insertions, 6 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index 8c38f0085403..8fa5cc3e02d2 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -376,6 +376,22 @@ out: | |||
376 | return 0; | 376 | return 0; |
377 | } | 377 | } |
378 | 378 | ||
379 | static void iommu_flush_complete(struct protection_domain *domain) | ||
380 | { | ||
381 | int i; | ||
382 | |||
383 | for (i = 0; i < amd_iommus_present; ++i) { | ||
384 | if (!domain->dev_iommu[i]) | ||
385 | continue; | ||
386 | |||
387 | /* | ||
388 | * Devices of this domain are behind this IOMMU | ||
389 | * We need to wait for completion of all commands. | ||
390 | */ | ||
391 | iommu_completion_wait(amd_iommus[i]); | ||
392 | } | ||
393 | } | ||
394 | |||
379 | /* | 395 | /* |
380 | * Command send function for invalidating a device table entry | 396 | * Command send function for invalidating a device table entry |
381 | */ | 397 | */ |
@@ -1758,7 +1774,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
1758 | if (addr == DMA_ERROR_CODE) | 1774 | if (addr == DMA_ERROR_CODE) |
1759 | goto out; | 1775 | goto out; |
1760 | 1776 | ||
1761 | iommu_completion_wait(iommu); | 1777 | iommu_flush_complete(domain); |
1762 | 1778 | ||
1763 | out: | 1779 | out: |
1764 | spin_unlock_irqrestore(&domain->lock, flags); | 1780 | spin_unlock_irqrestore(&domain->lock, flags); |
@@ -1791,7 +1807,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
1791 | 1807 | ||
1792 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | 1808 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); |
1793 | 1809 | ||
1794 | iommu_completion_wait(iommu); | 1810 | iommu_flush_complete(domain); |
1795 | 1811 | ||
1796 | spin_unlock_irqrestore(&domain->lock, flags); | 1812 | spin_unlock_irqrestore(&domain->lock, flags); |
1797 | } | 1813 | } |
@@ -1863,7 +1879,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
1863 | goto unmap; | 1879 | goto unmap; |
1864 | } | 1880 | } |
1865 | 1881 | ||
1866 | iommu_completion_wait(iommu); | 1882 | iommu_flush_complete(domain); |
1867 | 1883 | ||
1868 | out: | 1884 | out: |
1869 | spin_unlock_irqrestore(&domain->lock, flags); | 1885 | spin_unlock_irqrestore(&domain->lock, flags); |
@@ -1914,7 +1930,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
1914 | s->dma_address = s->dma_length = 0; | 1930 | s->dma_address = s->dma_length = 0; |
1915 | } | 1931 | } |
1916 | 1932 | ||
1917 | iommu_completion_wait(iommu); | 1933 | iommu_flush_complete(domain); |
1918 | 1934 | ||
1919 | spin_unlock_irqrestore(&domain->lock, flags); | 1935 | spin_unlock_irqrestore(&domain->lock, flags); |
1920 | } | 1936 | } |
@@ -1969,7 +1985,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1969 | goto out_free; | 1985 | goto out_free; |
1970 | } | 1986 | } |
1971 | 1987 | ||
1972 | iommu_completion_wait(iommu); | 1988 | iommu_flush_complete(domain); |
1973 | 1989 | ||
1974 | spin_unlock_irqrestore(&domain->lock, flags); | 1990 | spin_unlock_irqrestore(&domain->lock, flags); |
1975 | 1991 | ||
@@ -2010,7 +2026,7 @@ static void free_coherent(struct device *dev, size_t size, | |||
2010 | 2026 | ||
2011 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 2027 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); |
2012 | 2028 | ||
2013 | iommu_completion_wait(iommu); | 2029 | iommu_flush_complete(domain); |
2014 | 2030 | ||
2015 | spin_unlock_irqrestore(&domain->lock, flags); | 2031 | spin_unlock_irqrestore(&domain->lock, flags); |
2016 | 2032 | ||