aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index f3dadb571d9b..f854d89b7edf 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -118,7 +118,7 @@ static bool check_device(struct device *dev)
118 return false; 118 return false;
119 119
120 /* No device or no PCI device */ 120 /* No device or no PCI device */
121 if (!dev || dev->bus != &pci_bus_type) 121 if (dev->bus != &pci_bus_type)
122 return false; 122 return false;
123 123
124 devid = get_device_id(dev); 124 devid = get_device_id(dev);
@@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
392 u32 tail, head; 392 u32 tail, head;
393 u8 *target; 393 u8 *target;
394 394
395 WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED);
395 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); 396 tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET);
396 target = iommu->cmd_buf + tail; 397 target = iommu->cmd_buf + tail;
397 memcpy_toio(target, cmd, sizeof(*cmd)); 398 memcpy_toio(target, cmd, sizeof(*cmd));
@@ -2186,7 +2187,7 @@ static void prealloc_protection_domains(void)
2186 struct dma_ops_domain *dma_dom; 2187 struct dma_ops_domain *dma_dom;
2187 u16 devid; 2188 u16 devid;
2188 2189
2189 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 2190 for_each_pci_dev(dev) {
2190 2191
2191 /* Do we handle this device? */ 2192 /* Do we handle this device? */
2192 if (!check_device(&dev->dev)) 2193 if (!check_device(&dev->dev))
@@ -2298,7 +2299,7 @@ static void cleanup_domain(struct protection_domain *domain)
2298 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { 2299 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
2299 struct device *dev = dev_data->dev; 2300 struct device *dev = dev_data->dev;
2300 2301
2301 do_detach(dev); 2302 __detach_device(dev);
2302 atomic_set(&dev_data->bind, 0); 2303 atomic_set(&dev_data->bind, 0);
2303 } 2304 }
2304 2305
@@ -2327,6 +2328,7 @@ static struct protection_domain *protection_domain_alloc(void)
2327 return NULL; 2328 return NULL;
2328 2329
2329 spin_lock_init(&domain->lock); 2330 spin_lock_init(&domain->lock);
2331 mutex_init(&domain->api_lock);
2330 domain->id = domain_id_alloc(); 2332 domain->id = domain_id_alloc();
2331 if (!domain->id) 2333 if (!domain->id)
2332 goto out_err; 2334 goto out_err;
@@ -2379,9 +2381,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
2379 2381
2380 free_pagetable(domain); 2382 free_pagetable(domain);
2381 2383
2382 domain_id_free(domain->id); 2384 protection_domain_free(domain);
2383
2384 kfree(domain);
2385 2385
2386 dom->priv = NULL; 2386 dom->priv = NULL;
2387} 2387}
@@ -2456,6 +2456,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
2456 iova &= PAGE_MASK; 2456 iova &= PAGE_MASK;
2457 paddr &= PAGE_MASK; 2457 paddr &= PAGE_MASK;
2458 2458
2459 mutex_lock(&domain->api_lock);
2460
2459 for (i = 0; i < npages; ++i) { 2461 for (i = 0; i < npages; ++i) {
2460 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k); 2462 ret = iommu_map_page(domain, iova, paddr, prot, PM_MAP_4k);
2461 if (ret) 2463 if (ret)
@@ -2465,6 +2467,8 @@ static int amd_iommu_map_range(struct iommu_domain *dom,
2465 paddr += PAGE_SIZE; 2467 paddr += PAGE_SIZE;
2466 } 2468 }
2467 2469
2470 mutex_unlock(&domain->api_lock);
2471
2468 return 0; 2472 return 0;
2469} 2473}
2470 2474
@@ -2477,12 +2481,16 @@ static void amd_iommu_unmap_range(struct iommu_domain *dom,
2477 2481
2478 iova &= PAGE_MASK; 2482 iova &= PAGE_MASK;
2479 2483
2484 mutex_lock(&domain->api_lock);
2485
2480 for (i = 0; i < npages; ++i) { 2486 for (i = 0; i < npages; ++i) {
2481 iommu_unmap_page(domain, iova, PM_MAP_4k); 2487 iommu_unmap_page(domain, iova, PM_MAP_4k);
2482 iova += PAGE_SIZE; 2488 iova += PAGE_SIZE;
2483 } 2489 }
2484 2490
2485 iommu_flush_tlb_pde(domain); 2491 iommu_flush_tlb_pde(domain);
2492
2493 mutex_unlock(&domain->api_lock);
2486} 2494}
2487 2495
2488static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, 2496static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,