diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2008-12-02 11:49:42 -0500 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-01-03 08:11:54 -0500 |
commit | 5b28df6f43ac9878f310ad0cb7f11ddb262a7ac6 (patch) | |
tree | 9045d718c721886b90e7de98b572881f2482cdc1 /arch | |
parent | 9fdb19d64c0247f23343b51fc85f438f8e7a2f3c (diff) |
AMD IOMMU: add checks for dma_ops domain to dma_ops functions
Impact: detect when a driver uses a device assigned otherwise
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 41 |
1 files changed, 35 insertions, 6 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index bb28e2cda711..5c465c91150e 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -792,6 +792,15 @@ free_dma_dom: | |||
792 | } | 792 | } |
793 | 793 | ||
794 | /* | 794 | /* |
795 | * little helper function to check whether a given protection domain is a | ||
796 | * dma_ops domain | ||
797 | */ | ||
798 | static bool dma_ops_domain(struct protection_domain *domain) | ||
799 | { | ||
800 | return domain->flags & PD_DMA_OPS_MASK; | ||
801 | } | ||
802 | |||
803 | /* | ||
795 | * Find out the protection domain structure for a given PCI device. This | 804 | * Find out the protection domain structure for a given PCI device. This |
796 | * will give us the pointer to the page table root for example. | 805 | * will give us the pointer to the page table root for example. |
797 | */ | 806 | */ |
@@ -1096,6 +1105,9 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, | |||
1096 | /* device not handled by any AMD IOMMU */ | 1105 | /* device not handled by any AMD IOMMU */ |
1097 | return (dma_addr_t)paddr; | 1106 | return (dma_addr_t)paddr; |
1098 | 1107 | ||
1108 | if (!dma_ops_domain(domain)) | ||
1109 | return bad_dma_address; | ||
1110 | |||
1099 | spin_lock_irqsave(&domain->lock, flags); | 1111 | spin_lock_irqsave(&domain->lock, flags); |
1100 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, | 1112 | addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false, |
1101 | dma_mask); | 1113 | dma_mask); |
@@ -1126,6 +1138,9 @@ static void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
1126 | /* device not handled by any AMD IOMMU */ | 1138 | /* device not handled by any AMD IOMMU */ |
1127 | return; | 1139 | return; |
1128 | 1140 | ||
1141 | if (!dma_ops_domain(domain)) | ||
1142 | return; | ||
1143 | |||
1129 | spin_lock_irqsave(&domain->lock, flags); | 1144 | spin_lock_irqsave(&domain->lock, flags); |
1130 | 1145 | ||
1131 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); | 1146 | __unmap_single(iommu, domain->priv, dma_addr, size, dir); |
@@ -1180,6 +1195,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
1180 | if (!iommu || !domain) | 1195 | if (!iommu || !domain) |
1181 | return map_sg_no_iommu(dev, sglist, nelems, dir); | 1196 | return map_sg_no_iommu(dev, sglist, nelems, dir); |
1182 | 1197 | ||
1198 | if (!dma_ops_domain(domain)) | ||
1199 | return 0; | ||
1200 | |||
1183 | spin_lock_irqsave(&domain->lock, flags); | 1201 | spin_lock_irqsave(&domain->lock, flags); |
1184 | 1202 | ||
1185 | for_each_sg(sglist, s, nelems, i) { | 1203 | for_each_sg(sglist, s, nelems, i) { |
@@ -1233,6 +1251,9 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
1233 | !get_device_resources(dev, &iommu, &domain, &devid)) | 1251 | !get_device_resources(dev, &iommu, &domain, &devid)) |
1234 | return; | 1252 | return; |
1235 | 1253 | ||
1254 | if (!dma_ops_domain(domain)) | ||
1255 | return; | ||
1256 | |||
1236 | spin_lock_irqsave(&domain->lock, flags); | 1257 | spin_lock_irqsave(&domain->lock, flags); |
1237 | 1258 | ||
1238 | for_each_sg(sglist, s, nelems, i) { | 1259 | for_each_sg(sglist, s, nelems, i) { |
@@ -1278,6 +1299,9 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1278 | return virt_addr; | 1299 | return virt_addr; |
1279 | } | 1300 | } |
1280 | 1301 | ||
1302 | if (!dma_ops_domain(domain)) | ||
1303 | goto out_free; | ||
1304 | |||
1281 | if (!dma_mask) | 1305 | if (!dma_mask) |
1282 | dma_mask = *dev->dma_mask; | 1306 | dma_mask = *dev->dma_mask; |
1283 | 1307 | ||
@@ -1286,18 +1310,20 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1286 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, | 1310 | *dma_addr = __map_single(dev, iommu, domain->priv, paddr, |
1287 | size, DMA_BIDIRECTIONAL, true, dma_mask); | 1311 | size, DMA_BIDIRECTIONAL, true, dma_mask); |
1288 | 1312 | ||
1289 | if (*dma_addr == bad_dma_address) { | 1313 | if (*dma_addr == bad_dma_address) |
1290 | free_pages((unsigned long)virt_addr, get_order(size)); | 1314 | goto out_free; |
1291 | virt_addr = NULL; | ||
1292 | goto out; | ||
1293 | } | ||
1294 | 1315 | ||
1295 | iommu_completion_wait(iommu); | 1316 | iommu_completion_wait(iommu); |
1296 | 1317 | ||
1297 | out: | ||
1298 | spin_unlock_irqrestore(&domain->lock, flags); | 1318 | spin_unlock_irqrestore(&domain->lock, flags); |
1299 | 1319 | ||
1300 | return virt_addr; | 1320 | return virt_addr; |
1321 | |||
1322 | out_free: | ||
1323 | |||
1324 | free_pages((unsigned long)virt_addr, get_order(size)); | ||
1325 | |||
1326 | return NULL; | ||
1301 | } | 1327 | } |
1302 | 1328 | ||
1303 | /* | 1329 | /* |
@@ -1319,6 +1345,9 @@ static void free_coherent(struct device *dev, size_t size, | |||
1319 | if (!iommu || !domain) | 1345 | if (!iommu || !domain) |
1320 | goto free_mem; | 1346 | goto free_mem; |
1321 | 1347 | ||
1348 | if (!dma_ops_domain(domain)) | ||
1349 | goto free_mem; | ||
1350 | |||
1322 | spin_lock_irqsave(&domain->lock, flags); | 1351 | spin_lock_irqsave(&domain->lock, flags); |
1323 | 1352 | ||
1324 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); | 1353 | __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); |