diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2017-06-27 12:16:48 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2017-06-28 06:24:40 -0400 |
commit | 58c4a95f90839624b67f67acdb8a129f4383b569 (patch) | |
tree | 44e773e9f149411b27fe80cbfecb87fc78feb572 | |
parent | 71bb620df634b22a08efd62a0f93c3f2aceaa8e2 (diff) |
iommu/vt-d: Don't disable preemption while accessing deferred_flush()
get_cpu() disables preemption and returns the current CPU number. The
CPU number is only used once while retrieving the address of the local's
CPU deferred_flush pointer.
We can instead use raw_cpu_ptr() while we remain preemptible. The worst
thing that can happen is that flush_unmaps_timeout() is invoked multiple
times: once by taskA after seeing HIGH_WATER_MARK and then preempted to
another CPU and then by taskB which saw HIGH_WATER_MARK on the same CPU
as taskA. It is also likely that ->size got from HIGH_WATER_MARK to 0
right after its read because another CPU invoked flush_unmaps_timeout()
for this CPU.
The access to flush_data is protected by a spinlock so even if we get
migrated to another CPU or preempted - the data structure is protected.
While at it, I marked deferred_flush static since I can't find a
reference to it outside of this file.
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: iommu@lists.linux-foundation.org
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r-- | drivers/iommu/intel-iommu.c | 8 |
1 files changed, 2 insertions, 6 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index ee9c258d3ae0..0ca985b418ec 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -481,7 +481,7 @@ struct deferred_flush_data { | |||
481 | struct deferred_flush_table *tables; | 481 | struct deferred_flush_table *tables; |
482 | }; | 482 | }; |
483 | 483 | ||
484 | DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); | 484 | static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); |
485 | 485 | ||
486 | /* bitmap for indexing intel_iommus */ | 486 | /* bitmap for indexing intel_iommus */ |
487 | static int g_num_of_iommus; | 487 | static int g_num_of_iommus; |
@@ -3710,10 +3710,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, | |||
3710 | struct intel_iommu *iommu; | 3710 | struct intel_iommu *iommu; |
3711 | struct deferred_flush_entry *entry; | 3711 | struct deferred_flush_entry *entry; |
3712 | struct deferred_flush_data *flush_data; | 3712 | struct deferred_flush_data *flush_data; |
3713 | unsigned int cpuid; | ||
3714 | 3713 | ||
3715 | cpuid = get_cpu(); | 3714 | flush_data = raw_cpu_ptr(&deferred_flush); |
3716 | flush_data = per_cpu_ptr(&deferred_flush, cpuid); | ||
3717 | 3715 | ||
3718 | /* Flush all CPUs' entries to avoid deferring too much. If | 3716 | /* Flush all CPUs' entries to avoid deferring too much. If |
3719 | * this becomes a bottleneck, can just flush us, and rely on | 3717 | * this becomes a bottleneck, can just flush us, and rely on |
@@ -3746,8 +3744,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, | |||
3746 | } | 3744 | } |
3747 | flush_data->size++; | 3745 | flush_data->size++; |
3748 | spin_unlock_irqrestore(&flush_data->lock, flags); | 3746 | spin_unlock_irqrestore(&flush_data->lock, flags); |
3749 | |||
3750 | put_cpu(); | ||
3751 | } | 3747 | } |
3752 | 3748 | ||
3753 | static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) | 3749 | static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) |