diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-05-12 06:17:38 -0400 |
---|---|---|
committer | Joerg Roedel <joerg.roedel@amd.com> | 2009-05-28 12:13:43 -0400 |
commit | 53812c115cda1f660b286c939669154a56976f6b (patch) | |
tree | 3b130ac9ad18970553b904f69bddae722f60b9dc /arch/x86/kernel/amd_iommu.c | |
parent | 8bda3092bcfa68f786d94549ae026e8db1eff041 (diff) |
amd-iommu: handle page table allocation failures in dma_ops code
The code will be required when the aperture size increases dynamically
in the extended address allocator.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 20 |
1 files changed, 18 insertions, 2 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index ded79f7747c5..a467addb44b7 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1193,6 +1193,8 @@ static dma_addr_t dma_ops_domain_map(struct amd_iommu *iommu, | |||
1193 | paddr &= PAGE_MASK; | 1193 | paddr &= PAGE_MASK; |
1194 | 1194 | ||
1195 | pte = dma_ops_get_pte(dom, address); | 1195 | pte = dma_ops_get_pte(dom, address); |
1196 | if (!pte) | ||
1197 | return bad_dma_address; | ||
1196 | 1198 | ||
1197 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; | 1199 | __pte = paddr | IOMMU_PTE_P | IOMMU_PTE_FC; |
1198 | 1200 | ||
@@ -1248,7 +1250,7 @@ static dma_addr_t __map_single(struct device *dev, | |||
1248 | u64 dma_mask) | 1250 | u64 dma_mask) |
1249 | { | 1251 | { |
1250 | dma_addr_t offset = paddr & ~PAGE_MASK; | 1252 | dma_addr_t offset = paddr & ~PAGE_MASK; |
1251 | dma_addr_t address, start; | 1253 | dma_addr_t address, start, ret; |
1252 | unsigned int pages; | 1254 | unsigned int pages; |
1253 | unsigned long align_mask = 0; | 1255 | unsigned long align_mask = 0; |
1254 | int i; | 1256 | int i; |
@@ -1271,7 +1273,10 @@ static dma_addr_t __map_single(struct device *dev, | |||
1271 | 1273 | ||
1272 | start = address; | 1274 | start = address; |
1273 | for (i = 0; i < pages; ++i) { | 1275 | for (i = 0; i < pages; ++i) { |
1274 | dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); | 1276 | ret = dma_ops_domain_map(iommu, dma_dom, start, paddr, dir); |
1277 | if (ret == bad_dma_address) | ||
1278 | goto out_unmap; | ||
1279 | |||
1275 | paddr += PAGE_SIZE; | 1280 | paddr += PAGE_SIZE; |
1276 | start += PAGE_SIZE; | 1281 | start += PAGE_SIZE; |
1277 | } | 1282 | } |
@@ -1287,6 +1292,17 @@ static dma_addr_t __map_single(struct device *dev, | |||
1287 | 1292 | ||
1288 | out: | 1293 | out: |
1289 | return address; | 1294 | return address; |
1295 | |||
1296 | out_unmap: | ||
1297 | |||
1298 | for (--i; i >= 0; --i) { | ||
1299 | start -= PAGE_SIZE; | ||
1300 | dma_ops_domain_unmap(iommu, dma_dom, start); | ||
1301 | } | ||
1302 | |||
1303 | dma_ops_free_addresses(dma_dom, address, pages); | ||
1304 | |||
1305 | return bad_dma_address; | ||
1290 | } | 1306 | } |
1291 | 1307 | ||
1292 | /* | 1308 | /* |