aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/pci_iommu.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-05-31 19:57:59 -0400
committerDavid S. Miller <davem@davemloft.net>2005-05-31 19:57:59 -0400
commit7c963ad1d113790a8c723a178988b675868f3abe (patch)
tree2e1cc54795aeca06a11801636737901ba71a2ed8 /arch/sparc64/kernel/pci_iommu.c
parent2e3e80c2b75e3815a0160cbd23d4fdb767d66b35 (diff)
[SPARC64]: Fix streaming buffer flushing on PCI and SBUS.
Firstly, if the direction is TODEVICE, then dirty data in the streaming cache is impossible so we can elide the flush-flag synchronization in that case. Next, the context allocator is broken. It is highly likely that contexts get used multiple times for different dma mappings, which confuses the strbuf flushing code and makes it run inefficiently. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/pci_iommu.c')
-rw-r--r--arch/sparc64/kernel/pci_iommu.c90
1 files changed, 70 insertions, 20 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 33ca56c90da2..1807876f8c36 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -196,6 +196,34 @@ static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long
196 return NULL; 196 return NULL;
197} 197}
198 198
199static int iommu_alloc_ctx(struct pci_iommu *iommu)
200{
201 int lowest = iommu->ctx_lowest_free;
202 int sz = IOMMU_NUM_CTXS - lowest;
203 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
204
205 if (unlikely(n == sz)) {
206 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
207 if (unlikely(n == lowest)) {
208 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
209 n = 0;
210 }
211 }
212 if (n)
213 __set_bit(n, iommu->ctx_bitmap);
214
215 return n;
216}
217
218static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
219{
220 if (likely(ctx)) {
221 __clear_bit(ctx, iommu->ctx_bitmap);
222 if (ctx < iommu->ctx_lowest_free)
223 iommu->ctx_lowest_free = ctx;
224 }
225}
226
199/* Allocate and map kernel buffer of size SIZE using consistent mode 227/* Allocate and map kernel buffer of size SIZE using consistent mode
200 * DMA for PCI device PDEV. Return non-NULL cpu-side address if 228 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
201 * successful and set *DMA_ADDRP to the PCI side dma address. 229 * successful and set *DMA_ADDRP to the PCI side dma address.
@@ -236,7 +264,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
236 npages = size >> IO_PAGE_SHIFT; 264 npages = size >> IO_PAGE_SHIFT;
237 ctx = 0; 265 ctx = 0;
238 if (iommu->iommu_ctxflush) 266 if (iommu->iommu_ctxflush)
239 ctx = iommu->iommu_cur_ctx++; 267 ctx = iommu_alloc_ctx(iommu);
240 first_page = __pa(first_page); 268 first_page = __pa(first_page);
241 while (npages--) { 269 while (npages--) {
242 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | 270 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
@@ -317,6 +345,8 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
317 } 345 }
318 } 346 }
319 347
348 iommu_free_ctx(iommu, ctx);
349
320 spin_unlock_irqrestore(&iommu->lock, flags); 350 spin_unlock_irqrestore(&iommu->lock, flags);
321 351
322 order = get_order(size); 352 order = get_order(size);
@@ -360,7 +390,7 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
360 base_paddr = __pa(oaddr & IO_PAGE_MASK); 390 base_paddr = __pa(oaddr & IO_PAGE_MASK);
361 ctx = 0; 391 ctx = 0;
362 if (iommu->iommu_ctxflush) 392 if (iommu->iommu_ctxflush)
363 ctx = iommu->iommu_cur_ctx++; 393 ctx = iommu_alloc_ctx(iommu);
364 if (strbuf->strbuf_enabled) 394 if (strbuf->strbuf_enabled)
365 iopte_protection = IOPTE_STREAMING(ctx); 395 iopte_protection = IOPTE_STREAMING(ctx);
366 else 396 else
@@ -380,39 +410,55 @@ bad:
380 return PCI_DMA_ERROR_CODE; 410 return PCI_DMA_ERROR_CODE;
381} 411}
382 412
383static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages) 413static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
384{ 414{
385 int limit; 415 int limit;
386 416
387 PCI_STC_FLUSHFLAG_INIT(strbuf);
388 if (strbuf->strbuf_ctxflush && 417 if (strbuf->strbuf_ctxflush &&
389 iommu->iommu_ctxflush) { 418 iommu->iommu_ctxflush) {
390 unsigned long matchreg, flushreg; 419 unsigned long matchreg, flushreg;
420 u64 val;
391 421
392 flushreg = strbuf->strbuf_ctxflush; 422 flushreg = strbuf->strbuf_ctxflush;
393 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); 423 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
394 424
395 limit = 100000; 425 if (pci_iommu_read(matchreg) == 0)
426 goto do_flush_sync;
427
396 pci_iommu_write(flushreg, ctx); 428 pci_iommu_write(flushreg, ctx);
397 for(;;) { 429 if ((val = pci_iommu_read(matchreg)) == 0)
398 if (((long)pci_iommu_read(matchreg)) >= 0L) 430 goto do_flush_sync;
399 break; 431
400 limit--; 432 val &= 0xffff;
401 if (!limit) 433 while (val) {
402 break; 434 if (val & 0x1)
403 udelay(1); 435 pci_iommu_write(flushreg, ctx);
436 val >>= 1;
404 } 437 }
405 if (!limit) 438 val = pci_iommu_read(matchreg);
439 if (unlikely(val)) {
406 printk(KERN_WARNING "pci_strbuf_flush: ctx flush " 440 printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
407 "timeout vaddr[%08x] ctx[%lx]\n", 441 "timeout matchreg[%lx] ctx[%lx]\n",
408 vaddr, ctx); 442 val, ctx);
443 goto do_page_flush;
444 }
409 } else { 445 } else {
410 unsigned long i; 446 unsigned long i;
411 447
448 do_page_flush:
412 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) 449 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
413 pci_iommu_write(strbuf->strbuf_pflush, vaddr); 450 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
414 } 451 }
415 452
453do_flush_sync:
454 /* If the device could not have possibly put dirty data into
455 * the streaming cache, no flush-flag synchronization needs
456 * to be performed.
457 */
458 if (direction == PCI_DMA_TODEVICE)
459 return;
460
461 PCI_STC_FLUSHFLAG_INIT(strbuf);
416 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); 462 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
417 (void) pci_iommu_read(iommu->write_complete_reg); 463 (void) pci_iommu_read(iommu->write_complete_reg);
418 464
@@ -466,7 +512,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
466 512
467 /* Step 1: Kick data out of streaming buffers if necessary. */ 513 /* Step 1: Kick data out of streaming buffers if necessary. */
468 if (strbuf->strbuf_enabled) 514 if (strbuf->strbuf_enabled)
469 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); 515 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
470 516
471 /* Step 2: Clear out first TSB entry. */ 517 /* Step 2: Clear out first TSB entry. */
472 iopte_make_dummy(iommu, base); 518 iopte_make_dummy(iommu, base);
@@ -474,6 +520,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
474 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, 520 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
475 npages, ctx); 521 npages, ctx);
476 522
523 iommu_free_ctx(iommu, ctx);
524
477 spin_unlock_irqrestore(&iommu->lock, flags); 525 spin_unlock_irqrestore(&iommu->lock, flags);
478} 526}
479 527
@@ -613,7 +661,7 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
613 /* Step 4: Choose a context if necessary. */ 661 /* Step 4: Choose a context if necessary. */
614 ctx = 0; 662 ctx = 0;
615 if (iommu->iommu_ctxflush) 663 if (iommu->iommu_ctxflush)
616 ctx = iommu->iommu_cur_ctx++; 664 ctx = iommu_alloc_ctx(iommu);
617 665
618 /* Step 5: Create the mappings. */ 666 /* Step 5: Create the mappings. */
619 if (strbuf->strbuf_enabled) 667 if (strbuf->strbuf_enabled)
@@ -678,7 +726,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
678 726
679 /* Step 1: Kick data out of streaming buffers if necessary. */ 727 /* Step 1: Kick data out of streaming buffers if necessary. */
680 if (strbuf->strbuf_enabled) 728 if (strbuf->strbuf_enabled)
681 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); 729 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
682 730
683 /* Step 2: Clear out first TSB entry. */ 731 /* Step 2: Clear out first TSB entry. */
684 iopte_make_dummy(iommu, base); 732 iopte_make_dummy(iommu, base);
@@ -686,6 +734,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
686 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, 734 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
687 npages, ctx); 735 npages, ctx);
688 736
737 iommu_free_ctx(iommu, ctx);
738
689 spin_unlock_irqrestore(&iommu->lock, flags); 739 spin_unlock_irqrestore(&iommu->lock, flags);
690} 740}
691 741
@@ -724,7 +774,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size
724 } 774 }
725 775
726 /* Step 2: Kick data out of streaming buffers. */ 776 /* Step 2: Kick data out of streaming buffers. */
727 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); 777 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
728 778
729 spin_unlock_irqrestore(&iommu->lock, flags); 779 spin_unlock_irqrestore(&iommu->lock, flags);
730} 780}
@@ -768,7 +818,7 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
768 i--; 818 i--;
769 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) 819 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
770 - bus_addr) >> IO_PAGE_SHIFT; 820 - bus_addr) >> IO_PAGE_SHIFT;
771 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); 821 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
772 822
773 spin_unlock_irqrestore(&iommu->lock, flags); 823 spin_unlock_irqrestore(&iommu->lock, flags);
774} 824}