diff options
author | FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp> | 2009-11-17 17:44:35 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2009-12-14 19:38:43 -0500 |
commit | e2a465675dc089e9a56ba2fa2a5fbd9bd8844d18 (patch) | |
tree | aeeb04a79d8b3fef52ca900bd0fcc7bdab26e51c | |
parent | 9ee27c76393394c7fb1ddeca3f1622d4537185a0 (diff) |
[IA64] fix SBA IOMMU to handle allocation failure properly
It's possible that SBA IOMMU might fail to find I/O space under heavy
I/Os. SBA IOMMU panics on allocation failure but it shouldn't; drivers
can handle the failure. The majority of other IOMMU drivers don't panic
on allocation failure.
This patch fixes SBA IOMMU path to handle allocation failure properly.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 38 |
1 files changed, 29 insertions, 9 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index f332e3fe4237..e14c492a8a93 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -677,12 +677,19 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size) | |||
677 | spin_unlock_irqrestore(&ioc->saved_lock, flags); | 677 | spin_unlock_irqrestore(&ioc->saved_lock, flags); |
678 | 678 | ||
679 | pide = sba_search_bitmap(ioc, dev, pages_needed, 0); | 679 | pide = sba_search_bitmap(ioc, dev, pages_needed, 0); |
680 | if (unlikely(pide >= (ioc->res_size << 3))) | 680 | if (unlikely(pide >= (ioc->res_size << 3))) { |
681 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | 681 | printk(KERN_WARNING "%s: I/O MMU @ %p is" |
682 | ioc->ioc_hpa); | 682 | "out of mapping resources, %u %u %lx\n", |
683 | __func__, ioc->ioc_hpa, ioc->res_size, | ||
684 | pages_needed, dma_get_seg_boundary(dev)); | ||
685 | return -1; | ||
686 | } | ||
683 | #else | 687 | #else |
684 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | 688 | printk(KERN_WARNING "%s: I/O MMU @ %p is" |
685 | ioc->ioc_hpa); | 689 | "out of mapping resources, %u %u %lx\n", |
690 | __func__, ioc->ioc_hpa, ioc->res_size, | ||
691 | pages_needed, dma_get_seg_boundary(dev)); | ||
692 | return -1; | ||
686 | #endif | 693 | #endif |
687 | } | 694 | } |
688 | } | 695 | } |
@@ -965,6 +972,8 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page, | |||
965 | #endif | 972 | #endif |
966 | 973 | ||
967 | pide = sba_alloc_range(ioc, dev, size); | 974 | pide = sba_alloc_range(ioc, dev, size); |
975 | if (pide < 0) | ||
976 | return 0; | ||
968 | 977 | ||
969 | iovp = (dma_addr_t) pide << iovp_shift; | 978 | iovp = (dma_addr_t) pide << iovp_shift; |
970 | 979 | ||
@@ -1320,6 +1329,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1320 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ | 1329 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ |
1321 | int n_mappings = 0; | 1330 | int n_mappings = 0; |
1322 | unsigned int max_seg_size = dma_get_max_seg_size(dev); | 1331 | unsigned int max_seg_size = dma_get_max_seg_size(dev); |
1332 | int idx; | ||
1323 | 1333 | ||
1324 | while (nents > 0) { | 1334 | while (nents > 0) { |
1325 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); | 1335 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); |
@@ -1418,16 +1428,22 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1418 | vcontig_sg->dma_length = vcontig_len; | 1428 | vcontig_sg->dma_length = vcontig_len; |
1419 | dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; | 1429 | dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; |
1420 | ASSERT(dma_len <= DMA_CHUNK_SIZE); | 1430 | ASSERT(dma_len <= DMA_CHUNK_SIZE); |
1421 | dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG | 1431 | idx = sba_alloc_range(ioc, dev, dma_len); |
1422 | | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift) | 1432 | if (idx < 0) { |
1423 | | dma_offset); | 1433 | dma_sg->dma_length = 0; |
1434 | return -1; | ||
1435 | } | ||
1436 | dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift) | ||
1437 | | dma_offset); | ||
1424 | n_mappings++; | 1438 | n_mappings++; |
1425 | } | 1439 | } |
1426 | 1440 | ||
1427 | return n_mappings; | 1441 | return n_mappings; |
1428 | } | 1442 | } |
1429 | 1443 | ||
1430 | 1444 | static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, | |
1445 | int nents, enum dma_data_direction dir, | ||
1446 | struct dma_attrs *attrs); | ||
1431 | /** | 1447 | /** |
1432 | * sba_map_sg - map Scatter/Gather list | 1448 | * sba_map_sg - map Scatter/Gather list |
1433 | * @dev: instance of PCI owned by the driver that's asking. | 1449 | * @dev: instance of PCI owned by the driver that's asking. |
@@ -1493,6 +1509,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, | |||
1493 | ** Access to the virtual address is what forces a two pass algorithm. | 1509 | ** Access to the virtual address is what forces a two pass algorithm. |
1494 | */ | 1510 | */ |
1495 | coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); | 1511 | coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); |
1512 | if (coalesced < 0) { | ||
1513 | sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs); | ||
1514 | return 0; | ||
1515 | } | ||
1496 | 1516 | ||
1497 | /* | 1517 | /* |
1498 | ** Program the I/O Pdir | 1518 | ** Program the I/O Pdir |