diff options
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 26 |
1 files changed, 18 insertions, 8 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index d8da9988edd..fa5a1474cd1 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -18,8 +18,8 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/gfp.h> | ||
22 | #include <linux/bitmap.h> | 21 | #include <linux/bitmap.h> |
22 | #include <linux/slab.h> | ||
23 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
24 | #include <linux/scatterlist.h> | 24 | #include <linux/scatterlist.h> |
25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
@@ -118,7 +118,7 @@ static bool check_device(struct device *dev) | |||
118 | return false; | 118 | return false; |
119 | 119 | ||
120 | /* No device or no PCI device */ | 120 | /* No device or no PCI device */ |
121 | if (!dev || dev->bus != &pci_bus_type) | 121 | if (dev->bus != &pci_bus_type) |
122 | return false; | 122 | return false; |
123 | 123 | ||
124 | devid = get_device_id(dev); | 124 | devid = get_device_id(dev); |
@@ -392,6 +392,7 @@ static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) | |||
392 | u32 tail, head; | 392 | u32 tail, head; |
393 | u8 *target; | 393 | u8 *target; |
394 | 394 | ||
395 | WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); | ||
395 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); | 396 | tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); |
396 | target = iommu->cmd_buf + tail; | 397 | target = iommu->cmd_buf + tail; |
397 | memcpy_toio(target, cmd, sizeof(*cmd)); | 398 | memcpy_toio(target, cmd, sizeof(*cmd)); |
@@ -2253,7 +2254,7 @@ static void prealloc_protection_domains(void) | |||
2253 | struct dma_ops_domain *dma_dom; | 2254 | struct dma_ops_domain *dma_dom; |
2254 | u16 devid; | 2255 | u16 devid; |
2255 | 2256 | ||
2256 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 2257 | for_each_pci_dev(dev) { |
2257 | 2258 | ||
2258 | /* Do we handle this device? */ | 2259 | /* Do we handle this device? */ |
2259 | if (!check_device(&dev->dev)) | 2260 | if (!check_device(&dev->dev)) |
@@ -2365,7 +2366,7 @@ static void cleanup_domain(struct protection_domain *domain) | |||
2365 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { | 2366 | list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { |
2366 | struct device *dev = dev_data->dev; | 2367 | struct device *dev = dev_data->dev; |
2367 | 2368 | ||
2368 | do_detach(dev); | 2369 | __detach_device(dev); |
2369 | atomic_set(&dev_data->bind, 0); | 2370 | atomic_set(&dev_data->bind, 0); |
2370 | } | 2371 | } |
2371 | 2372 | ||
@@ -2394,6 +2395,7 @@ static struct protection_domain *protection_domain_alloc(void) | |||
2394 | return NULL; | 2395 | return NULL; |
2395 | 2396 | ||
2396 | spin_lock_init(&domain->lock); | 2397 | spin_lock_init(&domain->lock); |
2398 | mutex_init(&domain->api_lock); | ||
2397 | domain->id = domain_id_alloc(); | 2399 | domain->id = domain_id_alloc(); |
2398 | if (!domain->id) | 2400 | if (!domain->id) |
2399 | goto out_err; | 2401 | goto out_err; |
@@ -2446,9 +2448,7 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom) | |||
2446 | 2448 | ||
2447 | free_pagetable(domain); | 2449 | free_pagetable(domain); |
2448 | 2450 | ||
2449 | domain_id_free(domain->id); | 2451 | protection_domain_free(domain); |
2450 | |||
2451 | kfree(domain); | ||
2452 | 2452 | ||
2453 | dom->priv = NULL; | 2453 | dom->priv = NULL; |
2454 | } | 2454 | } |
@@ -2512,13 +2512,18 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova, | |||
2512 | unsigned long page_size = 0x1000UL << gfp_order; | 2512 | unsigned long page_size = 0x1000UL << gfp_order; |
2513 | struct protection_domain *domain = dom->priv; | 2513 | struct protection_domain *domain = dom->priv; |
2514 | int prot = 0; | 2514 | int prot = 0; |
2515 | int ret; | ||
2515 | 2516 | ||
2516 | if (iommu_prot & IOMMU_READ) | 2517 | if (iommu_prot & IOMMU_READ) |
2517 | prot |= IOMMU_PROT_IR; | 2518 | prot |= IOMMU_PROT_IR; |
2518 | if (iommu_prot & IOMMU_WRITE) | 2519 | if (iommu_prot & IOMMU_WRITE) |
2519 | prot |= IOMMU_PROT_IW; | 2520 | prot |= IOMMU_PROT_IW; |
2520 | 2521 | ||
2521 | return iommu_map_page(domain, iova, paddr, prot, page_size); | 2522 | mutex_lock(&domain->api_lock); |
2523 | ret = iommu_map_page(domain, iova, paddr, prot, page_size); | ||
2524 | mutex_unlock(&domain->api_lock); | ||
2525 | |||
2526 | return ret; | ||
2522 | } | 2527 | } |
2523 | 2528 | ||
2524 | static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | 2529 | static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, |
@@ -2528,7 +2533,12 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | |||
2528 | unsigned long page_size, unmap_size; | 2533 | unsigned long page_size, unmap_size; |
2529 | 2534 | ||
2530 | page_size = 0x1000UL << gfp_order; | 2535 | page_size = 0x1000UL << gfp_order; |
2536 | |||
2537 | mutex_lock(&domain->api_lock); | ||
2531 | unmap_size = iommu_unmap_page(domain, iova, page_size); | 2538 | unmap_size = iommu_unmap_page(domain, iova, page_size); |
2539 | mutex_unlock(&domain->api_lock); | ||
2540 | |||
2541 | iommu_flush_tlb_pde(domain); | ||
2532 | 2542 | ||
2533 | return get_order(unmap_size); | 2543 | return get_order(unmap_size); |
2534 | } | 2544 | } |