aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c54
1 files changed, 26 insertions, 28 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index d434a97a4c7f..2e2da717b350 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -188,6 +188,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
188 188
189 spin_lock_irqsave(&iommu->lock, flags); 189 spin_lock_irqsave(&iommu->lock, flags);
190 ret = __iommu_queue_command(iommu, cmd); 190 ret = __iommu_queue_command(iommu, cmd);
191 if (!ret)
192 iommu->need_sync = 1;
191 spin_unlock_irqrestore(&iommu->lock, flags); 193 spin_unlock_irqrestore(&iommu->lock, flags);
192 194
193 return ret; 195 return ret;
@@ -211,10 +213,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
211 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 213 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
212 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 214 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
213 215
214 iommu->need_sync = 0;
215
216 spin_lock_irqsave(&iommu->lock, flags); 216 spin_lock_irqsave(&iommu->lock, flags);
217 217
218 if (!iommu->need_sync)
219 goto out;
220
221 iommu->need_sync = 0;
222
218 ret = __iommu_queue_command(iommu, &cmd); 223 ret = __iommu_queue_command(iommu, &cmd);
219 224
220 if (ret) 225 if (ret)
@@ -231,8 +236,9 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
231 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; 236 status &= ~MMIO_STATUS_COM_WAIT_INT_MASK;
232 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); 237 writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET);
233 238
234 if (unlikely((i == EXIT_LOOP_COUNT) && printk_ratelimit())) 239 if (unlikely(i == EXIT_LOOP_COUNT))
235 printk(KERN_WARNING "AMD IOMMU: Completion wait loop failed\n"); 240 panic("AMD IOMMU: Completion wait loop failed\n");
241
236out: 242out:
237 spin_unlock_irqrestore(&iommu->lock, flags); 243 spin_unlock_irqrestore(&iommu->lock, flags);
238 244
@@ -255,8 +261,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
255 261
256 ret = iommu_queue_command(iommu, &cmd); 262 ret = iommu_queue_command(iommu, &cmd);
257 263
258 iommu->need_sync = 1;
259
260 return ret; 264 return ret;
261} 265}
262 266
@@ -282,8 +286,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
282 286
283 ret = iommu_queue_command(iommu, &cmd); 287 ret = iommu_queue_command(iommu, &cmd);
284 288
285 iommu->need_sync = 1;
286
287 return ret; 289 return ret;
288} 290}
289 291
@@ -344,7 +346,7 @@ static int iommu_map(struct protection_domain *dom,
344 u64 __pte, *pte, *page; 346 u64 __pte, *pte, *page;
345 347
346 bus_addr = PAGE_ALIGN(bus_addr); 348 bus_addr = PAGE_ALIGN(bus_addr);
347 phys_addr = PAGE_ALIGN(bus_addr); 349 phys_addr = PAGE_ALIGN(phys_addr);
348 350
349 /* only support 512GB address spaces for now */ 351 /* only support 512GB address spaces for now */
350 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) 352 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
@@ -600,7 +602,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
600 continue; 602 continue;
601 603
602 p2 = IOMMU_PTE_PAGE(p1[i]); 604 p2 = IOMMU_PTE_PAGE(p1[i]);
603 for (j = 0; j < 512; ++i) { 605 for (j = 0; j < 512; ++j) {
604 if (!IOMMU_PTE_PRESENT(p2[j])) 606 if (!IOMMU_PTE_PRESENT(p2[j]))
605 continue; 607 continue;
606 p3 = IOMMU_PTE_PAGE(p2[j]); 608 p3 = IOMMU_PTE_PAGE(p2[j]);
@@ -763,8 +765,6 @@ static void set_device_domain(struct amd_iommu *iommu,
763 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 765 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
764 766
765 iommu_queue_inv_dev_entry(iommu, devid); 767 iommu_queue_inv_dev_entry(iommu, devid);
766
767 iommu->need_sync = 1;
768} 768}
769 769
770/***************************************************************************** 770/*****************************************************************************
@@ -859,6 +859,9 @@ static int get_device_resources(struct device *dev,
859 print_devid(_bdf, 1); 859 print_devid(_bdf, 1);
860 } 860 }
861 861
862 if (domain_for_device(_bdf) == NULL)
863 set_device_domain(*iommu, *domain, _bdf);
864
862 return 1; 865 return 1;
863} 866}
864 867
@@ -909,7 +912,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
909 if (address >= dom->aperture_size) 912 if (address >= dom->aperture_size)
910 return; 913 return;
911 914
912 WARN_ON(address & 0xfffULL || address > dom->aperture_size); 915 WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
913 916
914 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; 917 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
915 pte += IOMMU_PTE_L0_INDEX(address); 918 pte += IOMMU_PTE_L0_INDEX(address);
@@ -921,8 +924,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
921 924
922/* 925/*
923 * This function contains common code for mapping of a physically 926 * This function contains common code for mapping of a physically
924 * contiguous memory region into DMA address space. It is uses by all 927 * contiguous memory region into DMA address space. It is used by all
925 * mapping functions provided by this IOMMU driver. 928 * mapping functions provided with this IOMMU driver.
926 * Must be called with the domain lock held. 929 * Must be called with the domain lock held.
927 */ 930 */
928static dma_addr_t __map_single(struct device *dev, 931static dma_addr_t __map_single(struct device *dev,
@@ -982,7 +985,8 @@ static void __unmap_single(struct amd_iommu *iommu,
982 dma_addr_t i, start; 985 dma_addr_t i, start;
983 unsigned int pages; 986 unsigned int pages;
984 987
985 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) 988 if ((dma_addr == bad_dma_address) ||
989 (dma_addr + size > dma_dom->aperture_size))
986 return; 990 return;
987 991
988 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 992 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
@@ -1032,8 +1036,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1032 if (addr == bad_dma_address) 1036 if (addr == bad_dma_address)
1033 goto out; 1037 goto out;
1034 1038
1035 if (unlikely(iommu->need_sync)) 1039 iommu_completion_wait(iommu);
1036 iommu_completion_wait(iommu);
1037 1040
1038out: 1041out:
1039 spin_unlock_irqrestore(&domain->lock, flags); 1042 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1061,8 +1064,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
1061 1064
1062 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1065 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
1063 1066
1064 if (unlikely(iommu->need_sync)) 1067 iommu_completion_wait(iommu);
1065 iommu_completion_wait(iommu);
1066 1068
1067 spin_unlock_irqrestore(&domain->lock, flags); 1069 spin_unlock_irqrestore(&domain->lock, flags);
1068} 1070}
@@ -1128,8 +1130,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1128 goto unmap; 1130 goto unmap;
1129 } 1131 }
1130 1132
1131 if (unlikely(iommu->need_sync)) 1133 iommu_completion_wait(iommu);
1132 iommu_completion_wait(iommu);
1133 1134
1134out: 1135out:
1135 spin_unlock_irqrestore(&domain->lock, flags); 1136 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1174,8 +1175,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1174 s->dma_address = s->dma_length = 0; 1175 s->dma_address = s->dma_length = 0;
1175 } 1176 }
1176 1177
1177 if (unlikely(iommu->need_sync)) 1178 iommu_completion_wait(iommu);
1178 iommu_completion_wait(iommu);
1179 1179
1180 spin_unlock_irqrestore(&domain->lock, flags); 1180 spin_unlock_irqrestore(&domain->lock, flags);
1181} 1181}
@@ -1226,8 +1226,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
1226 goto out; 1226 goto out;
1227 } 1227 }
1228 1228
1229 if (unlikely(iommu->need_sync)) 1229 iommu_completion_wait(iommu);
1230 iommu_completion_wait(iommu);
1231 1230
1232out: 1231out:
1233 spin_unlock_irqrestore(&domain->lock, flags); 1232 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1258,8 +1257,7 @@ static void free_coherent(struct device *dev, size_t size,
1258 1257
1259 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1258 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
1260 1259
1261 if (unlikely(iommu->need_sync)) 1260 iommu_completion_wait(iommu);
1262 iommu_completion_wait(iommu);
1263 1261
1264 spin_unlock_irqrestore(&domain->lock, flags); 1262 spin_unlock_irqrestore(&domain->lock, flags);
1265 1263