aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c49
1 files changed, 23 insertions, 26 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index e4899e0e8787..a7b6dec6fc3f 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -187,6 +187,8 @@ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd)
187 187
188 spin_lock_irqsave(&iommu->lock, flags); 188 spin_lock_irqsave(&iommu->lock, flags);
189 ret = __iommu_queue_command(iommu, cmd); 189 ret = __iommu_queue_command(iommu, cmd);
190 if (!ret)
191 iommu->need_sync = 1;
190 spin_unlock_irqrestore(&iommu->lock, flags); 192 spin_unlock_irqrestore(&iommu->lock, flags);
191 193
192 return ret; 194 return ret;
@@ -210,10 +212,13 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
210 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; 212 cmd.data[0] = CMD_COMPL_WAIT_INT_MASK;
211 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); 213 CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT);
212 214
213 iommu->need_sync = 0;
214
215 spin_lock_irqsave(&iommu->lock, flags); 215 spin_lock_irqsave(&iommu->lock, flags);
216 216
217 if (!iommu->need_sync)
218 goto out;
219
220 iommu->need_sync = 0;
221
217 ret = __iommu_queue_command(iommu, &cmd); 222 ret = __iommu_queue_command(iommu, &cmd);
218 223
219 if (ret) 224 if (ret)
@@ -254,8 +259,6 @@ static int iommu_queue_inv_dev_entry(struct amd_iommu *iommu, u16 devid)
254 259
255 ret = iommu_queue_command(iommu, &cmd); 260 ret = iommu_queue_command(iommu, &cmd);
256 261
257 iommu->need_sync = 1;
258
259 return ret; 262 return ret;
260} 263}
261 264
@@ -281,8 +284,6 @@ static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu,
281 284
282 ret = iommu_queue_command(iommu, &cmd); 285 ret = iommu_queue_command(iommu, &cmd);
283 286
284 iommu->need_sync = 1;
285
286 return ret; 287 return ret;
287} 288}
288 289
@@ -343,7 +344,7 @@ static int iommu_map(struct protection_domain *dom,
343 u64 __pte, *pte, *page; 344 u64 __pte, *pte, *page;
344 345
345 bus_addr = PAGE_ALIGN(bus_addr); 346 bus_addr = PAGE_ALIGN(bus_addr);
346 phys_addr = PAGE_ALIGN(bus_addr); 347 phys_addr = PAGE_ALIGN(phys_addr);
347 348
348 /* only support 512GB address spaces for now */ 349 /* only support 512GB address spaces for now */
349 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK)) 350 if (bus_addr > IOMMU_MAP_SIZE_L3 || !(prot & IOMMU_PROT_MASK))
@@ -599,7 +600,7 @@ static void dma_ops_free_pagetable(struct dma_ops_domain *dma_dom)
599 continue; 600 continue;
600 601
601 p2 = IOMMU_PTE_PAGE(p1[i]); 602 p2 = IOMMU_PTE_PAGE(p1[i]);
602 for (j = 0; j < 512; ++i) { 603 for (j = 0; j < 512; ++j) {
603 if (!IOMMU_PTE_PRESENT(p2[j])) 604 if (!IOMMU_PTE_PRESENT(p2[j]))
604 continue; 605 continue;
605 p3 = IOMMU_PTE_PAGE(p2[j]); 606 p3 = IOMMU_PTE_PAGE(p2[j]);
@@ -762,8 +763,6 @@ static void set_device_domain(struct amd_iommu *iommu,
762 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 763 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
763 764
764 iommu_queue_inv_dev_entry(iommu, devid); 765 iommu_queue_inv_dev_entry(iommu, devid);
765
766 iommu->need_sync = 1;
767} 766}
768 767
769/***************************************************************************** 768/*****************************************************************************
@@ -858,6 +857,9 @@ static int get_device_resources(struct device *dev,
858 print_devid(_bdf, 1); 857 print_devid(_bdf, 1);
859 } 858 }
860 859
860 if (domain_for_device(_bdf) == NULL)
861 set_device_domain(*iommu, *domain, _bdf);
862
861 return 1; 863 return 1;
862} 864}
863 865
@@ -908,7 +910,7 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
908 if (address >= dom->aperture_size) 910 if (address >= dom->aperture_size)
909 return; 911 return;
910 912
911 WARN_ON(address & 0xfffULL || address > dom->aperture_size); 913 WARN_ON(address & ~PAGE_MASK || address >= dom->aperture_size);
912 914
913 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)]; 915 pte = dom->pte_pages[IOMMU_PTE_L1_INDEX(address)];
914 pte += IOMMU_PTE_L0_INDEX(address); 916 pte += IOMMU_PTE_L0_INDEX(address);
@@ -920,8 +922,8 @@ static void dma_ops_domain_unmap(struct amd_iommu *iommu,
920 922
921/* 923/*
922 * This function contains common code for mapping of a physically 924 * This function contains common code for mapping of a physically
923 * contiguous memory region into DMA address space. It is uses by all 925 * contiguous memory region into DMA address space. It is used by all
924 * mapping functions provided by this IOMMU driver. 926 * mapping functions provided with this IOMMU driver.
925 * Must be called with the domain lock held. 927 * Must be called with the domain lock held.
926 */ 928 */
927static dma_addr_t __map_single(struct device *dev, 929static dma_addr_t __map_single(struct device *dev,
@@ -981,7 +983,8 @@ static void __unmap_single(struct amd_iommu *iommu,
981 dma_addr_t i, start; 983 dma_addr_t i, start;
982 unsigned int pages; 984 unsigned int pages;
983 985
984 if ((dma_addr == 0) || (dma_addr + size > dma_dom->aperture_size)) 986 if ((dma_addr == bad_dma_address) ||
987 (dma_addr + size > dma_dom->aperture_size))
985 return; 988 return;
986 989
987 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); 990 pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
@@ -1031,8 +1034,7 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
1031 if (addr == bad_dma_address) 1034 if (addr == bad_dma_address)
1032 goto out; 1035 goto out;
1033 1036
1034 if (unlikely(iommu->need_sync)) 1037 iommu_completion_wait(iommu);
1035 iommu_completion_wait(iommu);
1036 1038
1037out: 1039out:
1038 spin_unlock_irqrestore(&domain->lock, flags); 1040 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1060,8 +1062,7 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr,
1060 1062
1061 __unmap_single(iommu, domain->priv, dma_addr, size, dir); 1063 __unmap_single(iommu, domain->priv, dma_addr, size, dir);
1062 1064
1063 if (unlikely(iommu->need_sync)) 1065 iommu_completion_wait(iommu);
1064 iommu_completion_wait(iommu);
1065 1066
1066 spin_unlock_irqrestore(&domain->lock, flags); 1067 spin_unlock_irqrestore(&domain->lock, flags);
1067} 1068}
@@ -1127,8 +1128,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1127 goto unmap; 1128 goto unmap;
1128 } 1129 }
1129 1130
1130 if (unlikely(iommu->need_sync)) 1131 iommu_completion_wait(iommu);
1131 iommu_completion_wait(iommu);
1132 1132
1133out: 1133out:
1134 spin_unlock_irqrestore(&domain->lock, flags); 1134 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1173,8 +1173,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1173 s->dma_address = s->dma_length = 0; 1173 s->dma_address = s->dma_length = 0;
1174 } 1174 }
1175 1175
1176 if (unlikely(iommu->need_sync)) 1176 iommu_completion_wait(iommu);
1177 iommu_completion_wait(iommu);
1178 1177
1179 spin_unlock_irqrestore(&domain->lock, flags); 1178 spin_unlock_irqrestore(&domain->lock, flags);
1180} 1179}
@@ -1225,8 +1224,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
1225 goto out; 1224 goto out;
1226 } 1225 }
1227 1226
1228 if (unlikely(iommu->need_sync)) 1227 iommu_completion_wait(iommu);
1229 iommu_completion_wait(iommu);
1230 1228
1231out: 1229out:
1232 spin_unlock_irqrestore(&domain->lock, flags); 1230 spin_unlock_irqrestore(&domain->lock, flags);
@@ -1257,8 +1255,7 @@ static void free_coherent(struct device *dev, size_t size,
1257 1255
1258 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); 1256 __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
1259 1257
1260 if (unlikely(iommu->need_sync)) 1258 iommu_completion_wait(iommu);
1261 iommu_completion_wait(iommu);
1262 1259
1263 spin_unlock_irqrestore(&domain->lock, flags); 1260 spin_unlock_irqrestore(&domain->lock, flags);
1264 1261