aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/iommu/amd_iommu.c60
1 files changed, 56 insertions, 4 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2418fcc28fbe..9fafc3026865 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1806,6 +1806,61 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
1806 return 0; 1806 return 0;
1807} 1807}
1808 1808
1809static inline bool queue_ring_full(struct flush_queue *queue)
1810{
1811 return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
1812}
1813
1814#define queue_ring_for_each(i, q) \
1815 for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE)
1816
1817static void queue_release(struct dma_ops_domain *dom,
1818 struct flush_queue *queue)
1819{
1820 unsigned i;
1821
1822 queue_ring_for_each(i, queue)
1823 free_iova_fast(&dom->iovad,
1824 queue->entries[i].iova_pfn,
1825 queue->entries[i].pages);
1826
1827 queue->head = queue->tail = 0;
1828}
1829
1830static inline unsigned queue_ring_add(struct flush_queue *queue)
1831{
1832 unsigned idx = queue->tail;
1833
1834 queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;
1835
1836 return idx;
1837}
1838
1839static void queue_add(struct dma_ops_domain *dom,
1840 unsigned long address, unsigned long pages)
1841{
1842 struct flush_queue *queue;
1843 int idx;
1844
1845 pages = __roundup_pow_of_two(pages);
1846 address >>= PAGE_SHIFT;
1847
1848 queue = get_cpu_ptr(dom->flush_queue);
1849
1850 if (queue_ring_full(queue)) {
1851 domain_flush_tlb(&dom->domain);
1852 domain_flush_complete(&dom->domain);
1853 queue_release(dom, queue);
1854 }
1855
1856 idx = queue_ring_add(queue);
1857
1858 queue->entries[idx].iova_pfn = address;
1859 queue->entries[idx].pages = pages;
1860
1861 put_cpu_ptr(dom->flush_queue);
1862}
1863
1809/* 1864/*
1810 * Free a domain, only used if something went wrong in the 1865 * Free a domain, only used if something went wrong in the
1811 * allocation path and we need to free an already allocated page table 1866 * allocation path and we need to free an already allocated page table
@@ -2454,10 +2509,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
2454 domain_flush_tlb(&dma_dom->domain); 2509 domain_flush_tlb(&dma_dom->domain);
2455 domain_flush_complete(&dma_dom->domain); 2510 domain_flush_complete(&dma_dom->domain);
2456 } else { 2511 } else {
2457 /* Keep the if() around, we need it later again */ 2512 queue_add(dma_dom, dma_addr, pages);
2458 dma_ops_free_iova(dma_dom, dma_addr, pages);
2459 domain_flush_tlb(&dma_dom->domain);
2460 domain_flush_complete(&dma_dom->domain);
2461 } 2513 }
2462} 2514}
2463 2515