aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/pci_iommu.c165
-rw-r--r--arch/sparc64/kernel/sbus.c31
2 files changed, 88 insertions, 108 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 292983413ae2..f009b1b45501 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -8,6 +8,7 @@
8#include <linux/kernel.h> 8#include <linux/kernel.h>
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/delay.h>
11 12
12#include <asm/pbm.h> 13#include <asm/pbm.h>
13 14
@@ -379,6 +380,54 @@ bad:
379 return PCI_DMA_ERROR_CODE; 380 return PCI_DMA_ERROR_CODE;
380} 381}
381 382
383static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages)
384{
385 int limit;
386
387 PCI_STC_FLUSHFLAG_INIT(strbuf);
388 if (strbuf->strbuf_ctxflush &&
389 iommu->iommu_ctxflush) {
390 unsigned long matchreg, flushreg;
391
392 flushreg = strbuf->strbuf_ctxflush;
393 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
394
395 limit = 10000;
396 do {
397 pci_iommu_write(flushreg, ctx);
398 udelay(10);
399 limit--;
400 if (!limit)
401 break;
402 } while(((long)pci_iommu_read(matchreg)) < 0L);
403 if (!limit)
404 printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
405 "timeout vaddr[%08x] ctx[%lx]\n",
406 vaddr, ctx);
407 } else {
408 unsigned long i;
409
410 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
411 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
412 }
413
414 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
415 (void) pci_iommu_read(iommu->write_complete_reg);
416
417 limit = 10000;
418 while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
419 limit--;
420 if (!limit)
421 break;
422 udelay(10);
423 membar("#LoadLoad");
424 }
425 if (!limit)
426 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
427 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
428 vaddr, ctx, npages);
429}
430
382/* Unmap a single streaming mode DMA translation. */ 431/* Unmap a single streaming mode DMA translation. */
383void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) 432void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
384{ 433{
@@ -386,7 +435,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
386 struct pci_iommu *iommu; 435 struct pci_iommu *iommu;
387 struct pci_strbuf *strbuf; 436 struct pci_strbuf *strbuf;
388 iopte_t *base; 437 iopte_t *base;
389 unsigned long flags, npages, i, ctx; 438 unsigned long flags, npages, ctx;
390 439
391 if (direction == PCI_DMA_NONE) 440 if (direction == PCI_DMA_NONE)
392 BUG(); 441 BUG();
@@ -414,29 +463,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
414 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; 463 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
415 464
416 /* Step 1: Kick data out of streaming buffers if necessary. */ 465 /* Step 1: Kick data out of streaming buffers if necessary. */
417 if (strbuf->strbuf_enabled) { 466 if (strbuf->strbuf_enabled)
418 u32 vaddr = bus_addr; 467 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
419
420 PCI_STC_FLUSHFLAG_INIT(strbuf);
421 if (strbuf->strbuf_ctxflush &&
422 iommu->iommu_ctxflush) {
423 unsigned long matchreg, flushreg;
424
425 flushreg = strbuf->strbuf_ctxflush;
426 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
427 do {
428 pci_iommu_write(flushreg, ctx);
429 } while(((long)pci_iommu_read(matchreg)) < 0L);
430 } else {
431 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
432 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
433 }
434
435 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
436 (void) pci_iommu_read(iommu->write_complete_reg);
437 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
438 membar("#LoadLoad");
439 }
440 468
441 /* Step 2: Clear out first TSB entry. */ 469 /* Step 2: Clear out first TSB entry. */
442 iopte_make_dummy(iommu, base); 470 iopte_make_dummy(iommu, base);
@@ -647,29 +675,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
647 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; 675 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
648 676
649 /* Step 1: Kick data out of streaming buffers if necessary. */ 677 /* Step 1: Kick data out of streaming buffers if necessary. */
650 if (strbuf->strbuf_enabled) { 678 if (strbuf->strbuf_enabled)
651 u32 vaddr = (u32) bus_addr; 679 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
652
653 PCI_STC_FLUSHFLAG_INIT(strbuf);
654 if (strbuf->strbuf_ctxflush &&
655 iommu->iommu_ctxflush) {
656 unsigned long matchreg, flushreg;
657
658 flushreg = strbuf->strbuf_ctxflush;
659 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
660 do {
661 pci_iommu_write(flushreg, ctx);
662 } while(((long)pci_iommu_read(matchreg)) < 0L);
663 } else {
664 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
665 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
666 }
667
668 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
669 (void) pci_iommu_read(iommu->write_complete_reg);
670 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
671 membar("#LoadLoad");
672 }
673 680
674 /* Step 2: Clear out first TSB entry. */ 681 /* Step 2: Clear out first TSB entry. */
675 iopte_make_dummy(iommu, base); 682 iopte_make_dummy(iommu, base);
@@ -715,28 +722,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size
715 } 722 }
716 723
717 /* Step 2: Kick data out of streaming buffers. */ 724 /* Step 2: Kick data out of streaming buffers. */
718 PCI_STC_FLUSHFLAG_INIT(strbuf); 725 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
719 if (iommu->iommu_ctxflush &&
720 strbuf->strbuf_ctxflush) {
721 unsigned long matchreg, flushreg;
722
723 flushreg = strbuf->strbuf_ctxflush;
724 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
725 do {
726 pci_iommu_write(flushreg, ctx);
727 } while(((long)pci_iommu_read(matchreg)) < 0L);
728 } else {
729 unsigned long i;
730
731 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
732 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
733 }
734
735 /* Step 3: Perform flush synchronization sequence. */
736 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
737 (void) pci_iommu_read(iommu->write_complete_reg);
738 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
739 membar("#LoadLoad");
740 726
741 spin_unlock_irqrestore(&iommu->lock, flags); 727 spin_unlock_irqrestore(&iommu->lock, flags);
742} 728}
@@ -749,7 +735,8 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
749 struct pcidev_cookie *pcp; 735 struct pcidev_cookie *pcp;
750 struct pci_iommu *iommu; 736 struct pci_iommu *iommu;
751 struct pci_strbuf *strbuf; 737 struct pci_strbuf *strbuf;
752 unsigned long flags, ctx; 738 unsigned long flags, ctx, npages, i;
739 u32 bus_addr;
753 740
754 pcp = pdev->sysdata; 741 pcp = pdev->sysdata;
755 iommu = pcp->pbm->iommu; 742 iommu = pcp->pbm->iommu;
@@ -772,36 +759,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
772 } 759 }
773 760
774 /* Step 2: Kick data out of streaming buffers. */ 761 /* Step 2: Kick data out of streaming buffers. */
775 PCI_STC_FLUSHFLAG_INIT(strbuf); 762 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
776 if (iommu->iommu_ctxflush && 763 for(i = 1; i < nelems; i++)
777 strbuf->strbuf_ctxflush) { 764 if (!sglist[i].dma_length)
778 unsigned long matchreg, flushreg; 765 break;
779 766 i--;
780 flushreg = strbuf->strbuf_ctxflush; 767 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
781 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); 768 - bus_addr) >> IO_PAGE_SHIFT;
782 do { 769 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages);
783 pci_iommu_write(flushreg, ctx);
784 } while (((long)pci_iommu_read(matchreg)) < 0L);
785 } else {
786 unsigned long i, npages;
787 u32 bus_addr;
788
789 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
790
791 for(i = 1; i < nelems; i++)
792 if (!sglist[i].dma_length)
793 break;
794 i--;
795 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
796 for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE)
797 pci_iommu_write(strbuf->strbuf_pflush, bus_addr);
798 }
799
800 /* Step 3: Perform flush synchronization sequence. */
801 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
802 (void) pci_iommu_read(iommu->write_complete_reg);
803 while (!PCI_STC_FLUSHFLAG_SET(strbuf))
804 membar("#LoadLoad");
805 770
806 spin_unlock_irqrestore(&iommu->lock, flags); 771 spin_unlock_irqrestore(&iommu->lock, flags);
807} 772}
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index 14d9c3a21b9a..d3eca98e1fe7 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -117,19 +117,34 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages
117 117
118#define STRBUF_TAG_VALID 0x02UL 118#define STRBUF_TAG_VALID 0x02UL
119 119
120static void strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages) 120static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages)
121{ 121{
122 unsigned long n;
123 int limit;
124
122 iommu->strbuf_flushflag = 0UL; 125 iommu->strbuf_flushflag = 0UL;
123 while (npages--) 126 n = npages;
124 upa_writeq(base + (npages << IO_PAGE_SHIFT), 127 while (n--)
128 upa_writeq(base + (n << IO_PAGE_SHIFT),
125 iommu->strbuf_regs + STRBUF_PFLUSH); 129 iommu->strbuf_regs + STRBUF_PFLUSH);
126 130
127 /* Whoopee cushion! */ 131 /* Whoopee cushion! */
128 upa_writeq(__pa(&iommu->strbuf_flushflag), 132 upa_writeq(__pa(&iommu->strbuf_flushflag),
129 iommu->strbuf_regs + STRBUF_FSYNC); 133 iommu->strbuf_regs + STRBUF_FSYNC);
130 upa_readq(iommu->sbus_control_reg); 134 upa_readq(iommu->sbus_control_reg);
131 while (iommu->strbuf_flushflag == 0UL) 135
136 limit = 10000;
137 while (iommu->strbuf_flushflag == 0UL) {
138 limit--;
139 if (!limit)
140 break;
141 udelay(10);
132 membar("#LoadLoad"); 142 membar("#LoadLoad");
143 }
144 if (!limit)
145 printk(KERN_WARNING "sbus_strbuf_flush: flushflag timeout "
146 "vaddr[%08x] npages[%ld]\n",
147 base, npages);
133} 148}
134 149
135static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages) 150static iopte_t *alloc_streaming_cluster(struct sbus_iommu *iommu, unsigned long npages)
@@ -406,7 +421,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size,
406 421
407 spin_lock_irqsave(&iommu->lock, flags); 422 spin_lock_irqsave(&iommu->lock, flags);
408 free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); 423 free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT);
409 strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT); 424 sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT);
410 spin_unlock_irqrestore(&iommu->lock, flags); 425 spin_unlock_irqrestore(&iommu->lock, flags);
411} 426}
412 427
@@ -569,7 +584,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int
569 iommu = sdev->bus->iommu; 584 iommu = sdev->bus->iommu;
570 spin_lock_irqsave(&iommu->lock, flags); 585 spin_lock_irqsave(&iommu->lock, flags);
571 free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); 586 free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT);
572 strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT); 587 sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT);
573 spin_unlock_irqrestore(&iommu->lock, flags); 588 spin_unlock_irqrestore(&iommu->lock, flags);
574} 589}
575 590
@@ -581,7 +596,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t
581 size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); 596 size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK));
582 597
583 spin_lock_irqsave(&iommu->lock, flags); 598 spin_lock_irqsave(&iommu->lock, flags);
584 strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT); 599 sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT);
585 spin_unlock_irqrestore(&iommu->lock, flags); 600 spin_unlock_irqrestore(&iommu->lock, flags);
586} 601}
587 602
@@ -605,7 +620,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int
605 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; 620 size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base;
606 621
607 spin_lock_irqsave(&iommu->lock, flags); 622 spin_lock_irqsave(&iommu->lock, flags);
608 strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT); 623 sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT);
609 spin_unlock_irqrestore(&iommu->lock, flags); 624 spin_unlock_irqrestore(&iommu->lock, flags);
610} 625}
611 626