aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2016-07-06 07:56:36 -0400
committerJoerg Roedel <jroedel@suse.de>2016-07-13 06:48:35 -0400
commitbb279475db4d0bb07e4dbc99e060362b9f3b5093 (patch)
tree90200e96f31e77d4c4cb6655804bad33f6e6b8cd
parentb1516a14657acf81a587e9a6e733a881625eee53 (diff)
iommu/amd: Implement timeout to flush unmap queues
In case the queue doesn't fill up, we flush the TLB at least 10ms after the unmap happened to make sure that the TLB is cleaned up. Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--drivers/iommu/amd_iommu.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a8e4c5adade7..c0b2f4fc6bfc 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -105,6 +105,9 @@ struct flush_queue {
105 105
106DEFINE_PER_CPU(struct flush_queue, flush_queue); 106DEFINE_PER_CPU(struct flush_queue, flush_queue);
107 107
108static atomic_t queue_timer_on;
109static struct timer_list queue_timer;
110
108/* 111/*
109 * Domain for untranslated devices - only allocated 112 * Domain for untranslated devices - only allocated
110 * if iommu=pt passed on kernel cmd line. 113 * if iommu=pt passed on kernel cmd line.
@@ -2151,6 +2154,24 @@ static void __queue_flush(struct flush_queue *queue)
2151 queue->next = 0; 2154 queue->next = 0;
2152} 2155}
2153 2156
2157void queue_flush_timeout(unsigned long unsused)
2158{
2159 int cpu;
2160
2161 atomic_set(&queue_timer_on, 0);
2162
2163 for_each_possible_cpu(cpu) {
2164 struct flush_queue *queue;
2165 unsigned long flags;
2166
2167 queue = per_cpu_ptr(&flush_queue, cpu);
2168 spin_lock_irqsave(&queue->lock, flags);
2169 if (queue->next > 0)
2170 __queue_flush(queue);
2171 spin_unlock_irqrestore(&queue->lock, flags);
2172 }
2173}
2174
2154static void queue_add(struct dma_ops_domain *dma_dom, 2175static void queue_add(struct dma_ops_domain *dma_dom,
2155 unsigned long address, unsigned long pages) 2176 unsigned long address, unsigned long pages)
2156{ 2177{
@@ -2176,6 +2197,10 @@ static void queue_add(struct dma_ops_domain *dma_dom,
2176 entry->dma_dom = dma_dom; 2197 entry->dma_dom = dma_dom;
2177 2198
2178 spin_unlock_irqrestore(&queue->lock, flags); 2199 spin_unlock_irqrestore(&queue->lock, flags);
2200
2201 if (atomic_cmpxchg(&queue_timer_on, 0, 1) == 0)
2202 mod_timer(&queue_timer, jiffies + msecs_to_jiffies(10));
2203
2179 put_cpu_ptr(&flush_queue); 2204 put_cpu_ptr(&flush_queue);
2180} 2205}
2181 2206
@@ -2634,6 +2659,9 @@ out_put_iova:
2634 2659
2635int __init amd_iommu_init_dma_ops(void) 2660int __init amd_iommu_init_dma_ops(void)
2636{ 2661{
2662 setup_timer(&queue_timer, queue_flush_timeout, 0);
2663 atomic_set(&queue_timer_on, 0);
2664
2637 swiotlb = iommu_pass_through ? 1 : 0; 2665 swiotlb = iommu_pass_through ? 1 : 0;
2638 iommu_detected = 1; 2666 iommu_detected = 1;
2639 2667