aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2017-06-02 09:44:57 -0400
committerJoerg Roedel <jroedel@suse.de>2017-06-08 08:39:16 -0400
commite241f8e76c152e000d481fc8334d41d22c013fe8 (patch)
tree1be05851bc3efe2e7b7171b90b623627fc4c7f78
parentfd62190a67d6bdf9b93dea056adfcd7fd29b0f92 (diff)
iommu/amd: Add locking to per-domain flush-queue
With locking we can safely access the flush-queues of other cpus. Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/amd_iommu.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 9fafc3026865..9a06acc8cc9d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -146,6 +146,7 @@ struct flush_queue_entry {
146struct flush_queue { 146struct flush_queue {
147 struct flush_queue_entry *entries; 147 struct flush_queue_entry *entries;
148 unsigned head, tail; 148 unsigned head, tail;
149 spinlock_t lock;
149}; 150};
150 151
151/* 152/*
@@ -1801,6 +1802,8 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
1801 dma_ops_domain_free_flush_queue(dom); 1802 dma_ops_domain_free_flush_queue(dom);
1802 return -ENOMEM; 1803 return -ENOMEM;
1803 } 1804 }
1805
1806 spin_lock_init(&queue->lock);
1804 } 1807 }
1805 1808
1806 return 0; 1809 return 0;
@@ -1808,6 +1811,8 @@ static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom)
1808 1811
1809static inline bool queue_ring_full(struct flush_queue *queue) 1812static inline bool queue_ring_full(struct flush_queue *queue)
1810{ 1813{
1814 assert_spin_locked(&queue->lock);
1815
1811 return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head); 1816 return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head);
1812} 1817}
1813 1818
@@ -1819,6 +1824,8 @@ static void queue_release(struct dma_ops_domain *dom,
1819{ 1824{
1820 unsigned i; 1825 unsigned i;
1821 1826
1827 assert_spin_locked(&queue->lock);
1828
1822 queue_ring_for_each(i, queue) 1829 queue_ring_for_each(i, queue)
1823 free_iova_fast(&dom->iovad, 1830 free_iova_fast(&dom->iovad,
1824 queue->entries[i].iova_pfn, 1831 queue->entries[i].iova_pfn,
@@ -1831,6 +1838,7 @@ static inline unsigned queue_ring_add(struct flush_queue *queue)
1831{ 1838{
1832 unsigned idx = queue->tail; 1839 unsigned idx = queue->tail;
1833 1840
1841 assert_spin_locked(&queue->lock);
1834 queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE; 1842 queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE;
1835 1843
1836 return idx; 1844 return idx;
@@ -1840,12 +1848,14 @@ static void queue_add(struct dma_ops_domain *dom,
1840 unsigned long address, unsigned long pages) 1848 unsigned long address, unsigned long pages)
1841{ 1849{
1842 struct flush_queue *queue; 1850 struct flush_queue *queue;
1851 unsigned long flags;
1843 int idx; 1852 int idx;
1844 1853
1845 pages = __roundup_pow_of_two(pages); 1854 pages = __roundup_pow_of_two(pages);
1846 address >>= PAGE_SHIFT; 1855 address >>= PAGE_SHIFT;
1847 1856
1848 queue = get_cpu_ptr(dom->flush_queue); 1857 queue = get_cpu_ptr(dom->flush_queue);
1858 spin_lock_irqsave(&queue->lock, flags);
1849 1859
1850 if (queue_ring_full(queue)) { 1860 if (queue_ring_full(queue)) {
1851 domain_flush_tlb(&dom->domain); 1861 domain_flush_tlb(&dom->domain);
@@ -1858,6 +1868,7 @@ static void queue_add(struct dma_ops_domain *dom,
1858 queue->entries[idx].iova_pfn = address; 1868 queue->entries[idx].iova_pfn = address;
1859 queue->entries[idx].pages = pages; 1869 queue->entries[idx].pages = pages;
1860 1870
1871 spin_unlock_irqrestore(&queue->lock, flags);
1861 put_cpu_ptr(dom->flush_queue); 1872 put_cpu_ptr(dom->flush_queue);
1862} 1873}
1863 1874