diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-10-12 07:44:12 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-16 05:27:32 -0400 |
commit | 78bdc3106a877cfa50439fa66b52acbc4e7868df (patch) | |
tree | 668b1f22ee9e645e76589ce103b42829f999822e /arch/powerpc/kernel/iommu.c | |
parent | d1ed455e30e439e0d1483c2e236d7e15e1010704 (diff) |
PPC: sg chaining support
This updates the ppc iommu/pci dma mappers to sg chaining. Includes
further fixes from FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 23 |
1 files changed, 14 insertions, 9 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index e4ec6eee81a8..306a6f75b6c5 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -277,7 +277,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
277 | dma_addr_t dma_next = 0, dma_addr; | 277 | dma_addr_t dma_next = 0, dma_addr; |
278 | unsigned long flags; | 278 | unsigned long flags; |
279 | struct scatterlist *s, *outs, *segstart; | 279 | struct scatterlist *s, *outs, *segstart; |
280 | int outcount, incount; | 280 | int outcount, incount, i; |
281 | unsigned long handle; | 281 | unsigned long handle; |
282 | 282 | ||
283 | BUG_ON(direction == DMA_NONE); | 283 | BUG_ON(direction == DMA_NONE); |
@@ -297,7 +297,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
297 | 297 | ||
298 | spin_lock_irqsave(&(tbl->it_lock), flags); | 298 | spin_lock_irqsave(&(tbl->it_lock), flags); |
299 | 299 | ||
300 | for (s = outs; nelems; nelems--, s++) { | 300 | for_each_sg(sglist, s, nelems, i) { |
301 | unsigned long vaddr, npages, entry, slen; | 301 | unsigned long vaddr, npages, entry, slen; |
302 | 302 | ||
303 | slen = s->length; | 303 | slen = s->length; |
@@ -341,7 +341,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
341 | if (novmerge || (dma_addr != dma_next)) { | 341 | if (novmerge || (dma_addr != dma_next)) { |
342 | /* Can't merge: create a new segment */ | 342 | /* Can't merge: create a new segment */ |
343 | segstart = s; | 343 | segstart = s; |
344 | outcount++; outs++; | 344 | outcount++; |
345 | outs = sg_next(outs); | ||
345 | DBG(" can't merge, new segment.\n"); | 346 | DBG(" can't merge, new segment.\n"); |
346 | } else { | 347 | } else { |
347 | outs->dma_length += s->length; | 348 | outs->dma_length += s->length; |
@@ -374,7 +375,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
374 | * next entry of the sglist if we didn't fill the list completely | 375 | * next entry of the sglist if we didn't fill the list completely |
375 | */ | 376 | */ |
376 | if (outcount < incount) { | 377 | if (outcount < incount) { |
377 | outs++; | 378 | outs = sg_next(outs); |
378 | outs->dma_address = DMA_ERROR_CODE; | 379 | outs->dma_address = DMA_ERROR_CODE; |
379 | outs->dma_length = 0; | 380 | outs->dma_length = 0; |
380 | } | 381 | } |
@@ -385,7 +386,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
385 | return outcount; | 386 | return outcount; |
386 | 387 | ||
387 | failure: | 388 | failure: |
388 | for (s = &sglist[0]; s <= outs; s++) { | 389 | for_each_sg(sglist, s, nelems, i) { |
389 | if (s->dma_length != 0) { | 390 | if (s->dma_length != 0) { |
390 | unsigned long vaddr, npages; | 391 | unsigned long vaddr, npages; |
391 | 392 | ||
@@ -395,6 +396,8 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
395 | s->dma_address = DMA_ERROR_CODE; | 396 | s->dma_address = DMA_ERROR_CODE; |
396 | s->dma_length = 0; | 397 | s->dma_length = 0; |
397 | } | 398 | } |
399 | if (s == outs) | ||
400 | break; | ||
398 | } | 401 | } |
399 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | 402 | spin_unlock_irqrestore(&(tbl->it_lock), flags); |
400 | return 0; | 403 | return 0; |
@@ -404,6 +407,7 @@ int iommu_map_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
404 | void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | 407 | void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, |
405 | int nelems, enum dma_data_direction direction) | 408 | int nelems, enum dma_data_direction direction) |
406 | { | 409 | { |
410 | struct scatterlist *sg; | ||
407 | unsigned long flags; | 411 | unsigned long flags; |
408 | 412 | ||
409 | BUG_ON(direction == DMA_NONE); | 413 | BUG_ON(direction == DMA_NONE); |
@@ -413,15 +417,16 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
413 | 417 | ||
414 | spin_lock_irqsave(&(tbl->it_lock), flags); | 418 | spin_lock_irqsave(&(tbl->it_lock), flags); |
415 | 419 | ||
420 | sg = sglist; | ||
416 | while (nelems--) { | 421 | while (nelems--) { |
417 | unsigned int npages; | 422 | unsigned int npages; |
418 | dma_addr_t dma_handle = sglist->dma_address; | 423 | dma_addr_t dma_handle = sg->dma_address; |
419 | 424 | ||
420 | if (sglist->dma_length == 0) | 425 | if (sg->dma_length == 0) |
421 | break; | 426 | break; |
422 | npages = iommu_num_pages(dma_handle,sglist->dma_length); | 427 | npages = iommu_num_pages(dma_handle, sg->dma_length); |
423 | __iommu_free(tbl, dma_handle, npages); | 428 | __iommu_free(tbl, dma_handle, npages); |
424 | sglist++; | 429 | sg = sg_next(sg); |
425 | } | 430 | } |
426 | 431 | ||
427 | /* Flush/invalidate TLBs if necessary. As for iommu_free(), we | 432 | /* Flush/invalidate TLBs if necessary. As for iommu_free(), we |