diff options
Diffstat (limited to 'arch/sparc64/kernel/pci_iommu.c')
-rw-r--r-- | arch/sparc64/kernel/pci_iommu.c | 221 |
1 files changed, 118 insertions, 103 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 292983413ae2..2803bc7c2c79 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/delay.h> | ||
11 | 12 | ||
12 | #include <asm/pbm.h> | 13 | #include <asm/pbm.h> |
13 | 14 | ||
@@ -195,6 +196,34 @@ static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long | |||
195 | return NULL; | 196 | return NULL; |
196 | } | 197 | } |
197 | 198 | ||
199 | static int iommu_alloc_ctx(struct pci_iommu *iommu) | ||
200 | { | ||
201 | int lowest = iommu->ctx_lowest_free; | ||
202 | int sz = IOMMU_NUM_CTXS - lowest; | ||
203 | int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); | ||
204 | |||
205 | if (unlikely(n == sz)) { | ||
206 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); | ||
207 | if (unlikely(n == lowest)) { | ||
208 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); | ||
209 | n = 0; | ||
210 | } | ||
211 | } | ||
212 | if (n) | ||
213 | __set_bit(n, iommu->ctx_bitmap); | ||
214 | |||
215 | return n; | ||
216 | } | ||
217 | |||
218 | static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | ||
219 | { | ||
220 | if (likely(ctx)) { | ||
221 | __clear_bit(ctx, iommu->ctx_bitmap); | ||
222 | if (ctx < iommu->ctx_lowest_free) | ||
223 | iommu->ctx_lowest_free = ctx; | ||
224 | } | ||
225 | } | ||
226 | |||
198 | /* Allocate and map kernel buffer of size SIZE using consistent mode | 227 | /* Allocate and map kernel buffer of size SIZE using consistent mode |
199 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if | 228 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if |
200 | * successful and set *DMA_ADDRP to the PCI side dma address. | 229 | * successful and set *DMA_ADDRP to the PCI side dma address. |
@@ -235,7 +264,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad | |||
235 | npages = size >> IO_PAGE_SHIFT; | 264 | npages = size >> IO_PAGE_SHIFT; |
236 | ctx = 0; | 265 | ctx = 0; |
237 | if (iommu->iommu_ctxflush) | 266 | if (iommu->iommu_ctxflush) |
238 | ctx = iommu->iommu_cur_ctx++; | 267 | ctx = iommu_alloc_ctx(iommu); |
239 | first_page = __pa(first_page); | 268 | first_page = __pa(first_page); |
240 | while (npages--) { | 269 | while (npages--) { |
241 | iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | | 270 | iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | |
@@ -316,6 +345,8 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_ | |||
316 | } | 345 | } |
317 | } | 346 | } |
318 | 347 | ||
348 | iommu_free_ctx(iommu, ctx); | ||
349 | |||
319 | spin_unlock_irqrestore(&iommu->lock, flags); | 350 | spin_unlock_irqrestore(&iommu->lock, flags); |
320 | 351 | ||
321 | order = get_order(size); | 352 | order = get_order(size); |
@@ -359,7 +390,7 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct | |||
359 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | 390 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
360 | ctx = 0; | 391 | ctx = 0; |
361 | if (iommu->iommu_ctxflush) | 392 | if (iommu->iommu_ctxflush) |
362 | ctx = iommu->iommu_cur_ctx++; | 393 | ctx = iommu_alloc_ctx(iommu); |
363 | if (strbuf->strbuf_enabled) | 394 | if (strbuf->strbuf_enabled) |
364 | iopte_protection = IOPTE_STREAMING(ctx); | 395 | iopte_protection = IOPTE_STREAMING(ctx); |
365 | else | 396 | else |
@@ -379,6 +410,70 @@ bad: | |||
379 | return PCI_DMA_ERROR_CODE; | 410 | return PCI_DMA_ERROR_CODE; |
380 | } | 411 | } |
381 | 412 | ||
413 | static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) | ||
414 | { | ||
415 | int limit; | ||
416 | |||
417 | if (strbuf->strbuf_ctxflush && | ||
418 | iommu->iommu_ctxflush) { | ||
419 | unsigned long matchreg, flushreg; | ||
420 | u64 val; | ||
421 | |||
422 | flushreg = strbuf->strbuf_ctxflush; | ||
423 | matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); | ||
424 | |||
425 | pci_iommu_write(flushreg, ctx); | ||
426 | val = pci_iommu_read(matchreg); | ||
427 | val &= 0xffff; | ||
428 | if (!val) | ||
429 | goto do_flush_sync; | ||
430 | |||
431 | while (val) { | ||
432 | if (val & 0x1) | ||
433 | pci_iommu_write(flushreg, ctx); | ||
434 | val >>= 1; | ||
435 | } | ||
436 | val = pci_iommu_read(matchreg); | ||
437 | if (unlikely(val)) { | ||
438 | printk(KERN_WARNING "pci_strbuf_flush: ctx flush " | ||
439 | "timeout matchreg[%lx] ctx[%lx]\n", | ||
440 | val, ctx); | ||
441 | goto do_page_flush; | ||
442 | } | ||
443 | } else { | ||
444 | unsigned long i; | ||
445 | |||
446 | do_page_flush: | ||
447 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) | ||
448 | pci_iommu_write(strbuf->strbuf_pflush, vaddr); | ||
449 | } | ||
450 | |||
451 | do_flush_sync: | ||
452 | /* If the device could not have possibly put dirty data into | ||
453 | * the streaming cache, no flush-flag synchronization needs | ||
454 | * to be performed. | ||
455 | */ | ||
456 | if (direction == PCI_DMA_TODEVICE) | ||
457 | return; | ||
458 | |||
459 | PCI_STC_FLUSHFLAG_INIT(strbuf); | ||
460 | pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); | ||
461 | (void) pci_iommu_read(iommu->write_complete_reg); | ||
462 | |||
463 | limit = 100000; | ||
464 | while (!PCI_STC_FLUSHFLAG_SET(strbuf)) { | ||
465 | limit--; | ||
466 | if (!limit) | ||
467 | break; | ||
468 | udelay(1); | ||
469 | membar("#LoadLoad"); | ||
470 | } | ||
471 | if (!limit) | ||
472 | printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout " | ||
473 | "vaddr[%08x] ctx[%lx] npages[%ld]\n", | ||
474 | vaddr, ctx, npages); | ||
475 | } | ||
476 | |||
382 | /* Unmap a single streaming mode DMA translation. */ | 477 | /* Unmap a single streaming mode DMA translation. */ |
383 | void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) | 478 | void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction) |
384 | { | 479 | { |
@@ -386,7 +481,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int | |||
386 | struct pci_iommu *iommu; | 481 | struct pci_iommu *iommu; |
387 | struct pci_strbuf *strbuf; | 482 | struct pci_strbuf *strbuf; |
388 | iopte_t *base; | 483 | iopte_t *base; |
389 | unsigned long flags, npages, i, ctx; | 484 | unsigned long flags, npages, ctx; |
390 | 485 | ||
391 | if (direction == PCI_DMA_NONE) | 486 | if (direction == PCI_DMA_NONE) |
392 | BUG(); | 487 | BUG(); |
@@ -414,29 +509,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int | |||
414 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; | 509 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; |
415 | 510 | ||
416 | /* Step 1: Kick data out of streaming buffers if necessary. */ | 511 | /* Step 1: Kick data out of streaming buffers if necessary. */ |
417 | if (strbuf->strbuf_enabled) { | 512 | if (strbuf->strbuf_enabled) |
418 | u32 vaddr = bus_addr; | 513 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
419 | |||
420 | PCI_STC_FLUSHFLAG_INIT(strbuf); | ||
421 | if (strbuf->strbuf_ctxflush && | ||
422 | iommu->iommu_ctxflush) { | ||
423 | unsigned long matchreg, flushreg; | ||
424 | |||
425 | flushreg = strbuf->strbuf_ctxflush; | ||
426 | matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); | ||
427 | do { | ||
428 | pci_iommu_write(flushreg, ctx); | ||
429 | } while(((long)pci_iommu_read(matchreg)) < 0L); | ||
430 | } else { | ||
431 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) | ||
432 | pci_iommu_write(strbuf->strbuf_pflush, vaddr); | ||
433 | } | ||
434 | |||
435 | pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); | ||
436 | (void) pci_iommu_read(iommu->write_complete_reg); | ||
437 | while (!PCI_STC_FLUSHFLAG_SET(strbuf)) | ||
438 | membar("#LoadLoad"); | ||
439 | } | ||
440 | 514 | ||
441 | /* Step 2: Clear out first TSB entry. */ | 515 | /* Step 2: Clear out first TSB entry. */ |
442 | iopte_make_dummy(iommu, base); | 516 | iopte_make_dummy(iommu, base); |
@@ -444,6 +518,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int | |||
444 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, | 518 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, |
445 | npages, ctx); | 519 | npages, ctx); |
446 | 520 | ||
521 | iommu_free_ctx(iommu, ctx); | ||
522 | |||
447 | spin_unlock_irqrestore(&iommu->lock, flags); | 523 | spin_unlock_irqrestore(&iommu->lock, flags); |
448 | } | 524 | } |
449 | 525 | ||
@@ -583,7 +659,7 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int | |||
583 | /* Step 4: Choose a context if necessary. */ | 659 | /* Step 4: Choose a context if necessary. */ |
584 | ctx = 0; | 660 | ctx = 0; |
585 | if (iommu->iommu_ctxflush) | 661 | if (iommu->iommu_ctxflush) |
586 | ctx = iommu->iommu_cur_ctx++; | 662 | ctx = iommu_alloc_ctx(iommu); |
587 | 663 | ||
588 | /* Step 5: Create the mappings. */ | 664 | /* Step 5: Create the mappings. */ |
589 | if (strbuf->strbuf_enabled) | 665 | if (strbuf->strbuf_enabled) |
@@ -647,29 +723,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, | |||
647 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; | 723 | ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL; |
648 | 724 | ||
649 | /* Step 1: Kick data out of streaming buffers if necessary. */ | 725 | /* Step 1: Kick data out of streaming buffers if necessary. */ |
650 | if (strbuf->strbuf_enabled) { | 726 | if (strbuf->strbuf_enabled) |
651 | u32 vaddr = (u32) bus_addr; | 727 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
652 | |||
653 | PCI_STC_FLUSHFLAG_INIT(strbuf); | ||
654 | if (strbuf->strbuf_ctxflush && | ||
655 | iommu->iommu_ctxflush) { | ||
656 | unsigned long matchreg, flushreg; | ||
657 | |||
658 | flushreg = strbuf->strbuf_ctxflush; | ||
659 | matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); | ||
660 | do { | ||
661 | pci_iommu_write(flushreg, ctx); | ||
662 | } while(((long)pci_iommu_read(matchreg)) < 0L); | ||
663 | } else { | ||
664 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) | ||
665 | pci_iommu_write(strbuf->strbuf_pflush, vaddr); | ||
666 | } | ||
667 | |||
668 | pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); | ||
669 | (void) pci_iommu_read(iommu->write_complete_reg); | ||
670 | while (!PCI_STC_FLUSHFLAG_SET(strbuf)) | ||
671 | membar("#LoadLoad"); | ||
672 | } | ||
673 | 728 | ||
674 | /* Step 2: Clear out first TSB entry. */ | 729 | /* Step 2: Clear out first TSB entry. */ |
675 | iopte_make_dummy(iommu, base); | 730 | iopte_make_dummy(iommu, base); |
@@ -677,6 +732,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, | |||
677 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, | 732 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, |
678 | npages, ctx); | 733 | npages, ctx); |
679 | 734 | ||
735 | iommu_free_ctx(iommu, ctx); | ||
736 | |||
680 | spin_unlock_irqrestore(&iommu->lock, flags); | 737 | spin_unlock_irqrestore(&iommu->lock, flags); |
681 | } | 738 | } |
682 | 739 | ||
@@ -715,28 +772,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size | |||
715 | } | 772 | } |
716 | 773 | ||
717 | /* Step 2: Kick data out of streaming buffers. */ | 774 | /* Step 2: Kick data out of streaming buffers. */ |
718 | PCI_STC_FLUSHFLAG_INIT(strbuf); | 775 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
719 | if (iommu->iommu_ctxflush && | ||
720 | strbuf->strbuf_ctxflush) { | ||
721 | unsigned long matchreg, flushreg; | ||
722 | |||
723 | flushreg = strbuf->strbuf_ctxflush; | ||
724 | matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); | ||
725 | do { | ||
726 | pci_iommu_write(flushreg, ctx); | ||
727 | } while(((long)pci_iommu_read(matchreg)) < 0L); | ||
728 | } else { | ||
729 | unsigned long i; | ||
730 | |||
731 | for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE) | ||
732 | pci_iommu_write(strbuf->strbuf_pflush, bus_addr); | ||
733 | } | ||
734 | |||
735 | /* Step 3: Perform flush synchronization sequence. */ | ||
736 | pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); | ||
737 | (void) pci_iommu_read(iommu->write_complete_reg); | ||
738 | while (!PCI_STC_FLUSHFLAG_SET(strbuf)) | ||
739 | membar("#LoadLoad"); | ||
740 | 776 | ||
741 | spin_unlock_irqrestore(&iommu->lock, flags); | 777 | spin_unlock_irqrestore(&iommu->lock, flags); |
742 | } | 778 | } |
@@ -749,7 +785,8 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i | |||
749 | struct pcidev_cookie *pcp; | 785 | struct pcidev_cookie *pcp; |
750 | struct pci_iommu *iommu; | 786 | struct pci_iommu *iommu; |
751 | struct pci_strbuf *strbuf; | 787 | struct pci_strbuf *strbuf; |
752 | unsigned long flags, ctx; | 788 | unsigned long flags, ctx, npages, i; |
789 | u32 bus_addr; | ||
753 | 790 | ||
754 | pcp = pdev->sysdata; | 791 | pcp = pdev->sysdata; |
755 | iommu = pcp->pbm->iommu; | 792 | iommu = pcp->pbm->iommu; |
@@ -772,36 +809,14 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i | |||
772 | } | 809 | } |
773 | 810 | ||
774 | /* Step 2: Kick data out of streaming buffers. */ | 811 | /* Step 2: Kick data out of streaming buffers. */ |
775 | PCI_STC_FLUSHFLAG_INIT(strbuf); | 812 | bus_addr = sglist[0].dma_address & IO_PAGE_MASK; |
776 | if (iommu->iommu_ctxflush && | 813 | for(i = 1; i < nelems; i++) |
777 | strbuf->strbuf_ctxflush) { | 814 | if (!sglist[i].dma_length) |
778 | unsigned long matchreg, flushreg; | 815 | break; |
779 | 816 | i--; | |
780 | flushreg = strbuf->strbuf_ctxflush; | 817 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) |
781 | matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); | 818 | - bus_addr) >> IO_PAGE_SHIFT; |
782 | do { | 819 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
783 | pci_iommu_write(flushreg, ctx); | ||
784 | } while (((long)pci_iommu_read(matchreg)) < 0L); | ||
785 | } else { | ||
786 | unsigned long i, npages; | ||
787 | u32 bus_addr; | ||
788 | |||
789 | bus_addr = sglist[0].dma_address & IO_PAGE_MASK; | ||
790 | |||
791 | for(i = 1; i < nelems; i++) | ||
792 | if (!sglist[i].dma_length) | ||
793 | break; | ||
794 | i--; | ||
795 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT; | ||
796 | for (i = 0; i < npages; i++, bus_addr += IO_PAGE_SIZE) | ||
797 | pci_iommu_write(strbuf->strbuf_pflush, bus_addr); | ||
798 | } | ||
799 | |||
800 | /* Step 3: Perform flush synchronization sequence. */ | ||
801 | pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); | ||
802 | (void) pci_iommu_read(iommu->write_complete_reg); | ||
803 | while (!PCI_STC_FLUSHFLAG_SET(strbuf)) | ||
804 | membar("#LoadLoad"); | ||
805 | 820 | ||
806 | spin_unlock_irqrestore(&iommu->lock, flags); | 821 | spin_unlock_irqrestore(&iommu->lock, flags); |
807 | } | 822 | } |