aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2007-08-07 03:37:10 -0400
committerJens Axboe <jens.axboe@oracle.com>2007-10-16 05:27:32 -0400
commit2c941a204070ab32d92d40318a3196a7fb994c00 (patch)
tree94dc01c168b8330ab0390faeb602728a82e64df6 /arch
parent0912a5db0ea45d8aef3ee99a882e093285e32c3c (diff)
SPARC64: sg chaining support
This updates the sparc64 iommu/pci dma mappers to sg chaining. Acked-by: David S. Miller <davem@davemloft.net> Later updated to newer kernel with unified sparc64 iommu sg handling. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/kernel/iommu.c39
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c32
2 files changed, 43 insertions, 28 deletions
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index b35a62167e9c..db3ffcf7a120 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -10,6 +10,7 @@
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/scatterlist.h>
13 14
14#ifdef CONFIG_PCI 15#ifdef CONFIG_PCI
15#include <linux/pci.h> 16#include <linux/pci.h>
@@ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
480 unsigned long iopte_protection) 481 unsigned long iopte_protection)
481{ 482{
482 struct scatterlist *dma_sg = sg; 483 struct scatterlist *dma_sg = sg;
483 struct scatterlist *sg_end = sg + nelems; 484 struct scatterlist *sg_end = sg_last(sg, nelems);
484 int i; 485 int i;
485 486
486 for (i = 0; i < nused; i++) { 487 for (i = 0; i < nused; i++) {
@@ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
515 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); 516 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
516 break; 517 break;
517 } 518 }
518 sg++; 519 sg = sg_next(sg);
519 } 520 }
520 521
521 pteval = iopte_protection | (pteval & IOPTE_PAGE); 522 pteval = iopte_protection | (pteval & IOPTE_PAGE);
@@ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
528 } 529 }
529 530
530 pteval = (pteval & IOPTE_PAGE) + len; 531 pteval = (pteval & IOPTE_PAGE) + len;
531 sg++; 532 sg = sg_next(sg);
532 533
533 /* Skip over any tail mappings we've fully mapped, 534 /* Skip over any tail mappings we've fully mapped,
534 * adjusting pteval along the way. Stop when we 535 * adjusting pteval along the way. Stop when we
535 * detect a page crossing event. 536 * detect a page crossing event.
536 */ 537 */
537 while (sg < sg_end && 538 while (sg != sg_end &&
538 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL && 539 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
539 (pteval == SG_ENT_PHYS_ADDRESS(sg)) && 540 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
540 ((pteval ^ 541 ((pteval ^
541 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { 542 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
542 pteval += sg->length; 543 pteval += sg->length;
543 sg++; 544 sg = sg_next(sg);
544 } 545 }
545 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) 546 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
546 pteval = ~0UL; 547 pteval = ~0UL;
547 } while (dma_npages != 0); 548 } while (dma_npages != 0);
548 dma_sg++; 549 dma_sg = sg_next(dma_sg);
549 } 550 }
550} 551}
551 552
@@ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
606 sgtmp = sglist; 607 sgtmp = sglist;
607 while (used && sgtmp->dma_length) { 608 while (used && sgtmp->dma_length) {
608 sgtmp->dma_address += dma_base; 609 sgtmp->dma_address += dma_base;
609 sgtmp++; 610 sgtmp = sg_next(sgtmp);
610 used--; 611 used--;
611 } 612 }
612 used = nelems - used; 613 used = nelems - used;
@@ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
642 struct strbuf *strbuf; 643 struct strbuf *strbuf;
643 iopte_t *base; 644 iopte_t *base;
644 unsigned long flags, ctx, i, npages; 645 unsigned long flags, ctx, i, npages;
646 struct scatterlist *sg, *sgprv;
645 u32 bus_addr; 647 u32 bus_addr;
646 648
647 if (unlikely(direction == DMA_NONE)) { 649 if (unlikely(direction == DMA_NONE)) {
@@ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
654 656
655 bus_addr = sglist->dma_address & IO_PAGE_MASK; 657 bus_addr = sglist->dma_address & IO_PAGE_MASK;
656 658
657 for (i = 1; i < nelems; i++) 659 sgprv = NULL;
658 if (sglist[i].dma_length == 0) 660 for_each_sg(sglist, sg, nelems, i) {
661 if (sg->dma_length == 0)
659 break; 662 break;
660 i--; 663 sgprv = sg;
661 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - 664 }
665
666 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
662 bus_addr) >> IO_PAGE_SHIFT; 667 bus_addr) >> IO_PAGE_SHIFT;
663 668
664 base = iommu->page_table + 669 base = iommu->page_table +
@@ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
730 struct iommu *iommu; 735 struct iommu *iommu;
731 struct strbuf *strbuf; 736 struct strbuf *strbuf;
732 unsigned long flags, ctx, npages, i; 737 unsigned long flags, ctx, npages, i;
738 struct scatterlist *sg, *sgprv;
733 u32 bus_addr; 739 u32 bus_addr;
734 740
735 iommu = dev->archdata.iommu; 741 iommu = dev->archdata.iommu;
@@ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
753 759
754 /* Step 2: Kick data out of streaming buffers. */ 760 /* Step 2: Kick data out of streaming buffers. */
755 bus_addr = sglist[0].dma_address & IO_PAGE_MASK; 761 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
756 for(i = 1; i < nelems; i++) 762 sgprv = NULL;
757 if (!sglist[i].dma_length) 763 for_each_sg(sglist, sg, nelems, i) {
764 if (sg->dma_length == 0)
758 break; 765 break;
759 i--; 766 sgprv = sg;
760 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) 767 }
768
769 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
761 - bus_addr) >> IO_PAGE_SHIFT; 770 - bus_addr) >> IO_PAGE_SHIFT;
762 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); 771 strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
763 772
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 95de1444ee67..cacacfae5451 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -13,6 +13,7 @@
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/msi.h> 14#include <linux/msi.h>
15#include <linux/log2.h> 15#include <linux/log2.h>
16#include <linux/scatterlist.h>
16 17
17#include <asm/iommu.h> 18#include <asm/iommu.h>
18#include <asm/irq.h> 19#include <asm/irq.h>
@@ -373,7 +374,7 @@ static inline long fill_sg(long entry, struct device *dev,
373 int nused, int nelems, unsigned long prot) 374 int nused, int nelems, unsigned long prot)
374{ 375{
375 struct scatterlist *dma_sg = sg; 376 struct scatterlist *dma_sg = sg;
376 struct scatterlist *sg_end = sg + nelems; 377 struct scatterlist *sg_end = sg_last(sg, nelems);
377 unsigned long flags; 378 unsigned long flags;
378 int i; 379 int i;
379 380
@@ -413,7 +414,7 @@ static inline long fill_sg(long entry, struct device *dev,
413 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL))); 414 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
414 break; 415 break;
415 } 416 }
416 sg++; 417 sg = sg_next(sg);
417 } 418 }
418 419
419 pteval = (pteval & IOPTE_PAGE); 420 pteval = (pteval & IOPTE_PAGE);
@@ -431,24 +432,25 @@ static inline long fill_sg(long entry, struct device *dev,
431 } 432 }
432 433
433 pteval = (pteval & IOPTE_PAGE) + len; 434 pteval = (pteval & IOPTE_PAGE) + len;
434 sg++; 435 sg = sg_next(sg);
435 436
436 /* Skip over any tail mappings we've fully mapped, 437 /* Skip over any tail mappings we've fully mapped,
437 * adjusting pteval along the way. Stop when we 438 * adjusting pteval along the way. Stop when we
438 * detect a page crossing event. 439 * detect a page crossing event.
439 */ 440 */
440 while (sg < sg_end && 441 while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
441 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
442 (pteval == SG_ENT_PHYS_ADDRESS(sg)) && 442 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
443 ((pteval ^ 443 ((pteval ^
444 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) { 444 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
445 pteval += sg->length; 445 pteval += sg->length;
446 sg++; 446 if (sg == sg_end)
447 break;
448 sg = sg_next(sg);
447 } 449 }
448 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL) 450 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
449 pteval = ~0UL; 451 pteval = ~0UL;
450 } while (dma_npages != 0); 452 } while (dma_npages != 0);
451 dma_sg++; 453 dma_sg = sg_next(dma_sg);
452 } 454 }
453 455
454 if (unlikely(iommu_batch_end() < 0L)) 456 if (unlikely(iommu_batch_end() < 0L))
@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
510 sgtmp = sglist; 512 sgtmp = sglist;
511 while (used && sgtmp->dma_length) { 513 while (used && sgtmp->dma_length) {
512 sgtmp->dma_address += dma_base; 514 sgtmp->dma_address += dma_base;
513 sgtmp++; 515 sgtmp = sg_next(sgtmp);
514 used--; 516 used--;
515 } 517 }
516 used = nelems - used; 518 used = nelems - used;
@@ -545,6 +547,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
545 struct pci_pbm_info *pbm; 547 struct pci_pbm_info *pbm;
546 struct iommu *iommu; 548 struct iommu *iommu;
547 unsigned long flags, i, npages; 549 unsigned long flags, i, npages;
550 struct scatterlist *sg, *sgprv;
548 long entry; 551 long entry;
549 u32 devhandle, bus_addr; 552 u32 devhandle, bus_addr;
550 553
@@ -558,12 +561,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
558 devhandle = pbm->devhandle; 561 devhandle = pbm->devhandle;
559 562
560 bus_addr = sglist->dma_address & IO_PAGE_MASK; 563 bus_addr = sglist->dma_address & IO_PAGE_MASK;
561 564 sgprv = NULL;
562 for (i = 1; i < nelems; i++) 565 for_each_sg(sglist, sg, nelems, i) {
563 if (sglist[i].dma_length == 0) 566 if (sg->dma_length == 0)
564 break; 567 break;
565 i--; 568
566 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - 569 sgprv = sg;
570 }
571
572 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
567 bus_addr) >> IO_PAGE_SHIFT; 573 bus_addr) >> IO_PAGE_SHIFT;
568 574
569 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 575 entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);