diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2007-08-07 03:37:10 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2007-10-16 05:27:32 -0400 |
commit | 2c941a204070ab32d92d40318a3196a7fb994c00 (patch) | |
tree | 94dc01c168b8330ab0390faeb602728a82e64df6 /arch/sparc64/kernel/iommu.c | |
parent | 0912a5db0ea45d8aef3ee99a882e093285e32c3c (diff) |
SPARC64: sg chaining support
This updates the sparc64 iommu/pci dma mappers to sg chaining.
Acked-by: David S. Miller <davem@davemloft.net>
Later updated to newer kernel with unified sparc64 iommu sg handling.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/sparc64/kernel/iommu.c')
-rw-r--r-- | arch/sparc64/kernel/iommu.c | 39 |
1 files changed, 24 insertions, 15 deletions
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c index b35a62167e9c..db3ffcf7a120 100644 --- a/arch/sparc64/kernel/iommu.c +++ b/arch/sparc64/kernel/iommu.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
11 | #include <linux/dma-mapping.h> | 11 | #include <linux/dma-mapping.h> |
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/scatterlist.h> | ||
13 | 14 | ||
14 | #ifdef CONFIG_PCI | 15 | #ifdef CONFIG_PCI |
15 | #include <linux/pci.h> | 16 | #include <linux/pci.h> |
@@ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
480 | unsigned long iopte_protection) | 481 | unsigned long iopte_protection) |
481 | { | 482 | { |
482 | struct scatterlist *dma_sg = sg; | 483 | struct scatterlist *dma_sg = sg; |
483 | struct scatterlist *sg_end = sg + nelems; | 484 | struct scatterlist *sg_end = sg_last(sg, nelems); |
484 | int i; | 485 | int i; |
485 | 486 | ||
486 | for (i = 0; i < nused; i++) { | 487 | for (i = 0; i < nused; i++) { |
@@ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
515 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); | 516 | len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); |
516 | break; | 517 | break; |
517 | } | 518 | } |
518 | sg++; | 519 | sg = sg_next(sg); |
519 | } | 520 | } |
520 | 521 | ||
521 | pteval = iopte_protection | (pteval & IOPTE_PAGE); | 522 | pteval = iopte_protection | (pteval & IOPTE_PAGE); |
@@ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg, | |||
528 | } | 529 | } |
529 | 530 | ||
530 | pteval = (pteval & IOPTE_PAGE) + len; | 531 | pteval = (pteval & IOPTE_PAGE) + len; |
531 | sg++; | 532 | sg = sg_next(sg); |
532 | 533 | ||
533 | /* Skip over any tail mappings we've fully mapped, | 534 | /* Skip over any tail mappings we've fully mapped, |
534 | * adjusting pteval along the way. Stop when we | 535 | * adjusting pteval along the way. Stop when we |
535 | * detect a page crossing event. | 536 | * detect a page crossing event. |
536 | */ | 537 | */ |
537 | while (sg < sg_end && | 538 | while (sg != sg_end && |
538 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && | 539 | (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && |
539 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && | 540 | (pteval == SG_ENT_PHYS_ADDRESS(sg)) && |
540 | ((pteval ^ | 541 | ((pteval ^ |
541 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { | 542 | (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { |
542 | pteval += sg->length; | 543 | pteval += sg->length; |
543 | sg++; | 544 | sg = sg_next(sg); |
544 | } | 545 | } |
545 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) | 546 | if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) |
546 | pteval = ~0UL; | 547 | pteval = ~0UL; |
547 | } while (dma_npages != 0); | 548 | } while (dma_npages != 0); |
548 | dma_sg++; | 549 | dma_sg = sg_next(dma_sg); |
549 | } | 550 | } |
550 | } | 551 | } |
551 | 552 | ||
@@ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, | |||
606 | sgtmp = sglist; | 607 | sgtmp = sglist; |
607 | while (used && sgtmp->dma_length) { | 608 | while (used && sgtmp->dma_length) { |
608 | sgtmp->dma_address += dma_base; | 609 | sgtmp->dma_address += dma_base; |
609 | sgtmp++; | 610 | sgtmp = sg_next(sgtmp); |
610 | used--; | 611 | used--; |
611 | } | 612 | } |
612 | used = nelems - used; | 613 | used = nelems - used; |
@@ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
642 | struct strbuf *strbuf; | 643 | struct strbuf *strbuf; |
643 | iopte_t *base; | 644 | iopte_t *base; |
644 | unsigned long flags, ctx, i, npages; | 645 | unsigned long flags, ctx, i, npages; |
646 | struct scatterlist *sg, *sgprv; | ||
645 | u32 bus_addr; | 647 | u32 bus_addr; |
646 | 648 | ||
647 | if (unlikely(direction == DMA_NONE)) { | 649 | if (unlikely(direction == DMA_NONE)) { |
@@ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
654 | 656 | ||
655 | bus_addr = sglist->dma_address & IO_PAGE_MASK; | 657 | bus_addr = sglist->dma_address & IO_PAGE_MASK; |
656 | 658 | ||
657 | for (i = 1; i < nelems; i++) | 659 | sgprv = NULL; |
658 | if (sglist[i].dma_length == 0) | 660 | for_each_sg(sglist, sg, nelems, i) { |
661 | if (sg->dma_length == 0) | ||
659 | break; | 662 | break; |
660 | i--; | 663 | sgprv = sg; |
661 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - | 664 | } |
665 | |||
666 | npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) - | ||
662 | bus_addr) >> IO_PAGE_SHIFT; | 667 | bus_addr) >> IO_PAGE_SHIFT; |
663 | 668 | ||
664 | base = iommu->page_table + | 669 | base = iommu->page_table + |
@@ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, | |||
730 | struct iommu *iommu; | 735 | struct iommu *iommu; |
731 | struct strbuf *strbuf; | 736 | struct strbuf *strbuf; |
732 | unsigned long flags, ctx, npages, i; | 737 | unsigned long flags, ctx, npages, i; |
738 | struct scatterlist *sg, *sgprv; | ||
733 | u32 bus_addr; | 739 | u32 bus_addr; |
734 | 740 | ||
735 | iommu = dev->archdata.iommu; | 741 | iommu = dev->archdata.iommu; |
@@ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, | |||
753 | 759 | ||
754 | /* Step 2: Kick data out of streaming buffers. */ | 760 | /* Step 2: Kick data out of streaming buffers. */ |
755 | bus_addr = sglist[0].dma_address & IO_PAGE_MASK; | 761 | bus_addr = sglist[0].dma_address & IO_PAGE_MASK; |
756 | for(i = 1; i < nelems; i++) | 762 | sgprv = NULL; |
757 | if (!sglist[i].dma_length) | 763 | for_each_sg(sglist, sg, nelems, i) { |
764 | if (sg->dma_length == 0) | ||
758 | break; | 765 | break; |
759 | i--; | 766 | sgprv = sg; |
760 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) | 767 | } |
768 | |||
769 | npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) | ||
761 | - bus_addr) >> IO_PAGE_SHIFT; | 770 | - bus_addr) >> IO_PAGE_SHIFT; |
762 | strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); | 771 | strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
763 | 772 | ||