diff options
author | David S. Miller <davem@davemloft.net> | 2005-05-31 19:57:59 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-05-31 19:57:59 -0400 |
commit | 7c963ad1d113790a8c723a178988b675868f3abe (patch) | |
tree | 2e1cc54795aeca06a11801636737901ba71a2ed8 /arch/sparc64/kernel/sbus.c | |
parent | 2e3e80c2b75e3815a0160cbd23d4fdb767d66b35 (diff) |
[SPARC64]: Fix streaming buffer flushing on PCI and SBUS.
Firstly, if the direction is TODEVICE, then dirty data in the
streaming cache is impossible so we can elide the flush-flag
synchronization in that case.
Next, the context allocator is broken. It is highly likely
that contexts get used multiple times for different dma
mappings, which confuses the strbuf flushing code and makes
it run inefficiently.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/sbus.c')
-rw-r--r-- | arch/sparc64/kernel/sbus.c | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index 76ea6455433f..89f5e019f24c 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c | |||
@@ -117,17 +117,25 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages | |||
117 | 117 | ||
118 | #define STRBUF_TAG_VALID 0x02UL | 118 | #define STRBUF_TAG_VALID 0x02UL |
119 | 119 | ||
120 | static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages) | 120 | static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction) |
121 | { | 121 | { |
122 | unsigned long n; | 122 | unsigned long n; |
123 | int limit; | 123 | int limit; |
124 | 124 | ||
125 | iommu->strbuf_flushflag = 0UL; | ||
126 | n = npages; | 125 | n = npages; |
127 | while (n--) | 126 | while (n--) |
128 | upa_writeq(base + (n << IO_PAGE_SHIFT), | 127 | upa_writeq(base + (n << IO_PAGE_SHIFT), |
129 | iommu->strbuf_regs + STRBUF_PFLUSH); | 128 | iommu->strbuf_regs + STRBUF_PFLUSH); |
130 | 129 | ||
130 | /* If the device could not have possibly put dirty data into | ||
131 | * the streaming cache, no flush-flag synchronization needs | ||
132 | * to be performed. | ||
133 | */ | ||
134 | if (direction == SBUS_DMA_TODEVICE) | ||
135 | return; | ||
136 | |||
137 | iommu->strbuf_flushflag = 0UL; | ||
138 | |||
131 | /* Whoopee cushion! */ | 139 | /* Whoopee cushion! */ |
132 | upa_writeq(__pa(&iommu->strbuf_flushflag), | 140 | upa_writeq(__pa(&iommu->strbuf_flushflag), |
133 | iommu->strbuf_regs + STRBUF_FSYNC); | 141 | iommu->strbuf_regs + STRBUF_FSYNC); |
@@ -421,7 +429,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, | |||
421 | 429 | ||
422 | spin_lock_irqsave(&iommu->lock, flags); | 430 | spin_lock_irqsave(&iommu->lock, flags); |
423 | free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); | 431 | free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); |
424 | sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT); | 432 | sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction); |
425 | spin_unlock_irqrestore(&iommu->lock, flags); | 433 | spin_unlock_irqrestore(&iommu->lock, flags); |
426 | } | 434 | } |
427 | 435 | ||
@@ -584,7 +592,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int | |||
584 | iommu = sdev->bus->iommu; | 592 | iommu = sdev->bus->iommu; |
585 | spin_lock_irqsave(&iommu->lock, flags); | 593 | spin_lock_irqsave(&iommu->lock, flags); |
586 | free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); | 594 | free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); |
587 | sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT); | 595 | sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction); |
588 | spin_unlock_irqrestore(&iommu->lock, flags); | 596 | spin_unlock_irqrestore(&iommu->lock, flags); |
589 | } | 597 | } |
590 | 598 | ||
@@ -596,7 +604,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t | |||
596 | size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); | 604 | size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); |
597 | 605 | ||
598 | spin_lock_irqsave(&iommu->lock, flags); | 606 | spin_lock_irqsave(&iommu->lock, flags); |
599 | sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT); | 607 | sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction); |
600 | spin_unlock_irqrestore(&iommu->lock, flags); | 608 | spin_unlock_irqrestore(&iommu->lock, flags); |
601 | } | 609 | } |
602 | 610 | ||
@@ -620,7 +628,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int | |||
620 | size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; | 628 | size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; |
621 | 629 | ||
622 | spin_lock_irqsave(&iommu->lock, flags); | 630 | spin_lock_irqsave(&iommu->lock, flags); |
623 | sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT); | 631 | sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction); |
624 | spin_unlock_irqrestore(&iommu->lock, flags); | 632 | spin_unlock_irqrestore(&iommu->lock, flags); |
625 | } | 633 | } |
626 | 634 | ||