diff options
author | FUJITA Tomonori <tomof@acm.org> | 2008-02-05 01:27:58 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 12:44:10 -0500 |
commit | a031bbcb8d7559d61f383880f23dd0e047247410 (patch) | |
tree | 7cc77c1ddd1e38835bf312bed11955fdd9fea6cf | |
parent | 740c3ce66700640a6e6136ff679b067e92125794 (diff) |
iommu sg merging: IA64: make sba_iommu respect the segment size limits
This patch makes sba iommu respect segment size limits when merging sg
lists.
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Jeff Garzik <jeff@garzik.org>
Cc: James Bottomley <James.Bottomley@steeleye.com>
Acked-by: Jens Axboe <jens.axboe@oracle.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/ia64/hp/common/sba_iommu.c | 8 |
1 files changed, 6 insertions, 2 deletions
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 45bf04eb7d70..c412fe63f8ec 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -1265,7 +1265,7 @@ sba_fill_pdir( | |||
1265 | * the sglist do both. | 1265 | * the sglist do both. |
1266 | */ | 1266 | */ |
1267 | static SBA_INLINE int | 1267 | static SBA_INLINE int |
1268 | sba_coalesce_chunks( struct ioc *ioc, | 1268 | sba_coalesce_chunks(struct ioc *ioc, struct device *dev, |
1269 | struct scatterlist *startsg, | 1269 | struct scatterlist *startsg, |
1270 | int nents) | 1270 | int nents) |
1271 | { | 1271 | { |
@@ -1275,6 +1275,7 @@ sba_coalesce_chunks( struct ioc *ioc, | |||
1275 | struct scatterlist *dma_sg; /* next DMA stream head */ | 1275 | struct scatterlist *dma_sg; /* next DMA stream head */ |
1276 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ | 1276 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ |
1277 | int n_mappings = 0; | 1277 | int n_mappings = 0; |
1278 | unsigned int max_seg_size = dma_get_max_seg_size(dev); | ||
1278 | 1279 | ||
1279 | while (nents > 0) { | 1280 | while (nents > 0) { |
1280 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); | 1281 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); |
@@ -1314,6 +1315,9 @@ sba_coalesce_chunks( struct ioc *ioc, | |||
1314 | > DMA_CHUNK_SIZE) | 1315 | > DMA_CHUNK_SIZE) |
1315 | break; | 1316 | break; |
1316 | 1317 | ||
1318 | if (dma_len + startsg->length > max_seg_size) | ||
1319 | break; | ||
1320 | |||
1317 | /* | 1321 | /* |
1318 | ** Then look for virtually contiguous blocks. | 1322 | ** Then look for virtually contiguous blocks. |
1319 | ** | 1323 | ** |
@@ -1441,7 +1445,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di | |||
1441 | ** w/o this association, we wouldn't have coherent DMA! | 1445 | ** w/o this association, we wouldn't have coherent DMA! |
1442 | ** Access to the virtual address is what forces a two pass algorithm. | 1446 | ** Access to the virtual address is what forces a two pass algorithm. |
1443 | */ | 1447 | */ |
1444 | coalesced = sba_coalesce_chunks(ioc, sglist, nents); | 1448 | coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); |
1445 | 1449 | ||
1446 | /* | 1450 | /* |
1447 | ** Program the I/O Pdir | 1451 | ** Program the I/O Pdir |