diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-10-13 13:09:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-10-13 13:09:59 -0400 |
commit | 75542253127d4e4003a5542189c53ff85e4b27b2 (patch) | |
tree | 40cbda3a47bd96ca8d96a015bd6c73037039edb7 /drivers/iommu | |
parent | 06d1ee32a4d25356a710b49d5e95dbdd68bdf505 (diff) | |
parent | 5adad9915472e180712030d730cdc476c6f8a60b (diff) |
Merge tag 'iommu-fixes-v4.3-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU fixes from Joerg Roedel:
"A few fixes piled up:
- Fix for a suspend/resume issue where PCI probing code overwrote
dev->irq for the MSI irq of the AMD IOMMU.
- Fix for a kernel crash when a 32 bit PCI device was assigned to a
KVM guest.
- Fix for a possible memory leak in the VT-d driver
- A couple of fixes for the ARM-SMMU driver"
* tag 'iommu-fixes-v4.3-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
iommu/amd: Fix NULL pointer deref on device detach
iommu/amd: Prevent binding other PCI drivers to IOMMU PCI devices
iommu/vt-d: Fix memory leak in dmar_insert_one_dev_info()
iommu/arm-smmu: Use correct address mask for CMD_TLBI_S2_IPA
iommu/arm-smmu: Ensure IAS is set correctly for AArch32-capable SMMUs
iommu/io-pgtable-arm: Don't use dma_to_phys()
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/Kconfig | 3 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu.c | 9 | ||||
-rw-r--r-- | drivers/iommu/amd_iommu_init.c | 3 | ||||
-rw-r--r-- | drivers/iommu/arm-smmu-v3.c | 21 | ||||
-rw-r--r-- | drivers/iommu/intel-iommu.c | 1 | ||||
-rw-r--r-- | drivers/iommu/io-pgtable-arm.c | 24 |
6 files changed, 42 insertions, 19 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d9da766719c8..cbe6a890a93a 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig | |||
@@ -23,8 +23,7 @@ config IOMMU_IO_PGTABLE | |||
23 | config IOMMU_IO_PGTABLE_LPAE | 23 | config IOMMU_IO_PGTABLE_LPAE |
24 | bool "ARMv7/v8 Long Descriptor Format" | 24 | bool "ARMv7/v8 Long Descriptor Format" |
25 | select IOMMU_IO_PGTABLE | 25 | select IOMMU_IO_PGTABLE |
26 | # SWIOTLB guarantees a dma_to_phys() implementation | 26 | depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) |
27 | depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB) | ||
28 | help | 27 | help |
29 | Enable support for the ARM long descriptor pagetable format. | 28 | Enable support for the ARM long descriptor pagetable format. |
30 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page | 29 | This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index f82060e778a2..08d2775887f7 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -2006,6 +2006,15 @@ static void do_detach(struct iommu_dev_data *dev_data) | |||
2006 | { | 2006 | { |
2007 | struct amd_iommu *iommu; | 2007 | struct amd_iommu *iommu; |
2008 | 2008 | ||
2009 | /* | ||
2010 | * First check if the device is still attached. It might already | ||
2011 | * be detached from its domain because the generic | ||
2012 | * iommu_detach_group code detached it and we try again here in | ||
2013 | * our alias handling. | ||
2014 | */ | ||
2015 | if (!dev_data->domain) | ||
2016 | return; | ||
2017 | |||
2009 | iommu = amd_iommu_rlookup_table[dev_data->devid]; | 2018 | iommu = amd_iommu_rlookup_table[dev_data->devid]; |
2010 | 2019 | ||
2011 | /* decrease reference counters */ | 2020 | /* decrease reference counters */ |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 5ef347a13cb5..1b066e7d144d 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
@@ -1256,6 +1256,9 @@ static int iommu_init_pci(struct amd_iommu *iommu) | |||
1256 | if (!iommu->dev) | 1256 | if (!iommu->dev) |
1257 | return -ENODEV; | 1257 | return -ENODEV; |
1258 | 1258 | ||
1259 | /* Prevent binding other PCI device drivers to IOMMU devices */ | ||
1260 | iommu->dev->match_driver = false; | ||
1261 | |||
1259 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, | 1262 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, |
1260 | &iommu->cap); | 1263 | &iommu->cap); |
1261 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, | 1264 | pci_read_config_dword(iommu->dev, cap_ptr + MMIO_RANGE_OFFSET, |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index dafaf59dc3b8..286e890e7d64 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #define IDR0_TTF_SHIFT 2 | 56 | #define IDR0_TTF_SHIFT 2 |
57 | #define IDR0_TTF_MASK 0x3 | 57 | #define IDR0_TTF_MASK 0x3 |
58 | #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT) | 58 | #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT) |
59 | #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT) | ||
59 | #define IDR0_S1P (1 << 1) | 60 | #define IDR0_S1P (1 << 1) |
60 | #define IDR0_S2P (1 << 0) | 61 | #define IDR0_S2P (1 << 0) |
61 | 62 | ||
@@ -342,7 +343,8 @@ | |||
342 | #define CMDQ_TLBI_0_VMID_SHIFT 32 | 343 | #define CMDQ_TLBI_0_VMID_SHIFT 32 |
343 | #define CMDQ_TLBI_0_ASID_SHIFT 48 | 344 | #define CMDQ_TLBI_0_ASID_SHIFT 48 |
344 | #define CMDQ_TLBI_1_LEAF (1UL << 0) | 345 | #define CMDQ_TLBI_1_LEAF (1UL << 0) |
345 | #define CMDQ_TLBI_1_ADDR_MASK ~0xfffUL | 346 | #define CMDQ_TLBI_1_VA_MASK ~0xfffUL |
347 | #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL | ||
346 | 348 | ||
347 | #define CMDQ_PRI_0_SSID_SHIFT 12 | 349 | #define CMDQ_PRI_0_SSID_SHIFT 12 |
348 | #define CMDQ_PRI_0_SSID_MASK 0xfffffUL | 350 | #define CMDQ_PRI_0_SSID_MASK 0xfffffUL |
@@ -770,11 +772,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) | |||
770 | break; | 772 | break; |
771 | case CMDQ_OP_TLBI_NH_VA: | 773 | case CMDQ_OP_TLBI_NH_VA: |
772 | cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; | 774 | cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; |
773 | /* Fallthrough */ | 775 | cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; |
776 | cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; | ||
777 | break; | ||
774 | case CMDQ_OP_TLBI_S2_IPA: | 778 | case CMDQ_OP_TLBI_S2_IPA: |
775 | cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; | 779 | cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT; |
776 | cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; | 780 | cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0; |
777 | cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_ADDR_MASK; | 781 | cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; |
778 | break; | 782 | break; |
779 | case CMDQ_OP_TLBI_NH_ASID: | 783 | case CMDQ_OP_TLBI_NH_ASID: |
780 | cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; | 784 | cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT; |
@@ -2460,7 +2464,13 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) | |||
2460 | } | 2464 | } |
2461 | 2465 | ||
2462 | /* We only support the AArch64 table format at present */ | 2466 | /* We only support the AArch64 table format at present */ |
2463 | if ((reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) < IDR0_TTF_AARCH64) { | 2467 | switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) { |
2468 | case IDR0_TTF_AARCH32_64: | ||
2469 | smmu->ias = 40; | ||
2470 | /* Fallthrough */ | ||
2471 | case IDR0_TTF_AARCH64: | ||
2472 | break; | ||
2473 | default: | ||
2464 | dev_err(smmu->dev, "AArch64 table format not supported!\n"); | 2474 | dev_err(smmu->dev, "AArch64 table format not supported!\n"); |
2465 | return -ENXIO; | 2475 | return -ENXIO; |
2466 | } | 2476 | } |
@@ -2541,8 +2551,7 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) | |||
2541 | dev_warn(smmu->dev, | 2551 | dev_warn(smmu->dev, |
2542 | "failed to set DMA mask for table walker\n"); | 2552 | "failed to set DMA mask for table walker\n"); |
2543 | 2553 | ||
2544 | if (!smmu->ias) | 2554 | smmu->ias = max(smmu->ias, smmu->oas); |
2545 | smmu->ias = smmu->oas; | ||
2546 | 2555 | ||
2547 | dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", | 2556 | dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n", |
2548 | smmu->ias, smmu->oas, smmu->features); | 2557 | smmu->ias, smmu->oas, smmu->features); |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 041bc1810a86..35365f046923 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -2301,6 +2301,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, | |||
2301 | 2301 | ||
2302 | if (ret) { | 2302 | if (ret) { |
2303 | spin_unlock_irqrestore(&device_domain_lock, flags); | 2303 | spin_unlock_irqrestore(&device_domain_lock, flags); |
2304 | free_devinfo_mem(info); | ||
2304 | return NULL; | 2305 | return NULL; |
2305 | } | 2306 | } |
2306 | 2307 | ||
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 73c07482f487..7df97777662d 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
@@ -202,9 +202,9 @@ typedef u64 arm_lpae_iopte; | |||
202 | 202 | ||
203 | static bool selftest_running = false; | 203 | static bool selftest_running = false; |
204 | 204 | ||
205 | static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages) | 205 | static dma_addr_t __arm_lpae_dma_addr(void *pages) |
206 | { | 206 | { |
207 | return phys_to_dma(dev, virt_to_phys(pages)); | 207 | return (dma_addr_t)virt_to_phys(pages); |
208 | } | 208 | } |
209 | 209 | ||
210 | static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, | 210 | static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, |
@@ -223,10 +223,10 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, | |||
223 | goto out_free; | 223 | goto out_free; |
224 | /* | 224 | /* |
225 | * We depend on the IOMMU being able to work with any physical | 225 | * We depend on the IOMMU being able to work with any physical |
226 | * address directly, so if the DMA layer suggests it can't by | 226 | * address directly, so if the DMA layer suggests otherwise by |
227 | * giving us back some translation, that bodes very badly... | 227 | * translating or truncating them, that bodes very badly... |
228 | */ | 228 | */ |
229 | if (dma != __arm_lpae_dma_addr(dev, pages)) | 229 | if (dma != virt_to_phys(pages)) |
230 | goto out_unmap; | 230 | goto out_unmap; |
231 | } | 231 | } |
232 | 232 | ||
@@ -243,10 +243,8 @@ out_free: | |||
243 | static void __arm_lpae_free_pages(void *pages, size_t size, | 243 | static void __arm_lpae_free_pages(void *pages, size_t size, |
244 | struct io_pgtable_cfg *cfg) | 244 | struct io_pgtable_cfg *cfg) |
245 | { | 245 | { |
246 | struct device *dev = cfg->iommu_dev; | ||
247 | |||
248 | if (!selftest_running) | 246 | if (!selftest_running) |
249 | dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages), | 247 | dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), |
250 | size, DMA_TO_DEVICE); | 248 | size, DMA_TO_DEVICE); |
251 | free_pages_exact(pages, size); | 249 | free_pages_exact(pages, size); |
252 | } | 250 | } |
@@ -254,12 +252,11 @@ static void __arm_lpae_free_pages(void *pages, size_t size, | |||
254 | static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, | 252 | static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, |
255 | struct io_pgtable_cfg *cfg) | 253 | struct io_pgtable_cfg *cfg) |
256 | { | 254 | { |
257 | struct device *dev = cfg->iommu_dev; | ||
258 | |||
259 | *ptep = pte; | 255 | *ptep = pte; |
260 | 256 | ||
261 | if (!selftest_running) | 257 | if (!selftest_running) |
262 | dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep), | 258 | dma_sync_single_for_device(cfg->iommu_dev, |
259 | __arm_lpae_dma_addr(ptep), | ||
263 | sizeof(pte), DMA_TO_DEVICE); | 260 | sizeof(pte), DMA_TO_DEVICE); |
264 | } | 261 | } |
265 | 262 | ||
@@ -629,6 +626,11 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) | |||
629 | if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) | 626 | if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) |
630 | return NULL; | 627 | return NULL; |
631 | 628 | ||
629 | if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) { | ||
630 | dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n"); | ||
631 | return NULL; | ||
632 | } | ||
633 | |||
632 | data = kmalloc(sizeof(*data), GFP_KERNEL); | 634 | data = kmalloc(sizeof(*data), GFP_KERNEL); |
633 | if (!data) | 635 | if (!data) |
634 | return NULL; | 636 | return NULL; |