diff options
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/kernel/pci_iommu.c | 90 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_psycho.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_sabre.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/pci_schizo.c | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/sbus.c | 20 |
5 files changed, 87 insertions, 29 deletions
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 33ca56c90da2..1807876f8c36 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
@@ -196,6 +196,34 @@ static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long | |||
196 | return NULL; | 196 | return NULL; |
197 | } | 197 | } |
198 | 198 | ||
199 | static int iommu_alloc_ctx(struct pci_iommu *iommu) | ||
200 | { | ||
201 | int lowest = iommu->ctx_lowest_free; | ||
202 | int sz = IOMMU_NUM_CTXS - lowest; | ||
203 | int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); | ||
204 | |||
205 | if (unlikely(n == sz)) { | ||
206 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); | ||
207 | if (unlikely(n == lowest)) { | ||
208 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); | ||
209 | n = 0; | ||
210 | } | ||
211 | } | ||
212 | if (n) | ||
213 | __set_bit(n, iommu->ctx_bitmap); | ||
214 | |||
215 | return n; | ||
216 | } | ||
217 | |||
218 | static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | ||
219 | { | ||
220 | if (likely(ctx)) { | ||
221 | __clear_bit(ctx, iommu->ctx_bitmap); | ||
222 | if (ctx < iommu->ctx_lowest_free) | ||
223 | iommu->ctx_lowest_free = ctx; | ||
224 | } | ||
225 | } | ||
226 | |||
199 | /* Allocate and map kernel buffer of size SIZE using consistent mode | 227 | /* Allocate and map kernel buffer of size SIZE using consistent mode |
200 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if | 228 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if |
201 | * successful and set *DMA_ADDRP to the PCI side dma address. | 229 | * successful and set *DMA_ADDRP to the PCI side dma address. |
@@ -236,7 +264,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad | |||
236 | npages = size >> IO_PAGE_SHIFT; | 264 | npages = size >> IO_PAGE_SHIFT; |
237 | ctx = 0; | 265 | ctx = 0; |
238 | if (iommu->iommu_ctxflush) | 266 | if (iommu->iommu_ctxflush) |
239 | ctx = iommu->iommu_cur_ctx++; | 267 | ctx = iommu_alloc_ctx(iommu); |
240 | first_page = __pa(first_page); | 268 | first_page = __pa(first_page); |
241 | while (npages--) { | 269 | while (npages--) { |
242 | iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | | 270 | iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | |
@@ -317,6 +345,8 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_ | |||
317 | } | 345 | } |
318 | } | 346 | } |
319 | 347 | ||
348 | iommu_free_ctx(iommu, ctx); | ||
349 | |||
320 | spin_unlock_irqrestore(&iommu->lock, flags); | 350 | spin_unlock_irqrestore(&iommu->lock, flags); |
321 | 351 | ||
322 | order = get_order(size); | 352 | order = get_order(size); |
@@ -360,7 +390,7 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct | |||
360 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | 390 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
361 | ctx = 0; | 391 | ctx = 0; |
362 | if (iommu->iommu_ctxflush) | 392 | if (iommu->iommu_ctxflush) |
363 | ctx = iommu->iommu_cur_ctx++; | 393 | ctx = iommu_alloc_ctx(iommu); |
364 | if (strbuf->strbuf_enabled) | 394 | if (strbuf->strbuf_enabled) |
365 | iopte_protection = IOPTE_STREAMING(ctx); | 395 | iopte_protection = IOPTE_STREAMING(ctx); |
366 | else | 396 | else |
@@ -380,39 +410,55 @@ bad: | |||
380 | return PCI_DMA_ERROR_CODE; | 410 | return PCI_DMA_ERROR_CODE; |
381 | } | 411 | } |
382 | 412 | ||
383 | static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages) | 413 | static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) |
384 | { | 414 | { |
385 | int limit; | 415 | int limit; |
386 | 416 | ||
387 | PCI_STC_FLUSHFLAG_INIT(strbuf); | ||
388 | if (strbuf->strbuf_ctxflush && | 417 | if (strbuf->strbuf_ctxflush && |
389 | iommu->iommu_ctxflush) { | 418 | iommu->iommu_ctxflush) { |
390 | unsigned long matchreg, flushreg; | 419 | unsigned long matchreg, flushreg; |
420 | u64 val; | ||
391 | 421 | ||
392 | flushreg = strbuf->strbuf_ctxflush; | 422 | flushreg = strbuf->strbuf_ctxflush; |
393 | matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); | 423 | matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); |
394 | 424 | ||
395 | limit = 100000; | 425 | if (pci_iommu_read(matchreg) == 0) |
426 | goto do_flush_sync; | ||
427 | |||
396 | pci_iommu_write(flushreg, ctx); | 428 | pci_iommu_write(flushreg, ctx); |
397 | for(;;) { | 429 | if ((val = pci_iommu_read(matchreg)) == 0) |
398 | if (((long)pci_iommu_read(matchreg)) >= 0L) | 430 | goto do_flush_sync; |
399 | break; | 431 | |
400 | limit--; | 432 | val &= 0xffff; |
401 | if (!limit) | 433 | while (val) { |
402 | break; | 434 | if (val & 0x1) |
403 | udelay(1); | 435 | pci_iommu_write(flushreg, ctx); |
436 | val >>= 1; | ||
404 | } | 437 | } |
405 | if (!limit) | 438 | val = pci_iommu_read(matchreg); |
439 | if (unlikely(val)) { | ||
406 | printk(KERN_WARNING "pci_strbuf_flush: ctx flush " | 440 | printk(KERN_WARNING "pci_strbuf_flush: ctx flush " |
407 | "timeout vaddr[%08x] ctx[%lx]\n", | 441 | "timeout matchreg[%lx] ctx[%lx]\n", |
408 | vaddr, ctx); | 442 | val, ctx); |
443 | goto do_page_flush; | ||
444 | } | ||
409 | } else { | 445 | } else { |
410 | unsigned long i; | 446 | unsigned long i; |
411 | 447 | ||
448 | do_page_flush: | ||
412 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) | 449 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) |
413 | pci_iommu_write(strbuf->strbuf_pflush, vaddr); | 450 | pci_iommu_write(strbuf->strbuf_pflush, vaddr); |
414 | } | 451 | } |
415 | 452 | ||
453 | do_flush_sync: | ||
454 | /* If the device could not have possibly put dirty data into | ||
455 | * the streaming cache, no flush-flag synchronization needs | ||
456 | * to be performed. | ||
457 | */ | ||
458 | if (direction == PCI_DMA_TODEVICE) | ||
459 | return; | ||
460 | |||
461 | PCI_STC_FLUSHFLAG_INIT(strbuf); | ||
416 | pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); | 462 | pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); |
417 | (void) pci_iommu_read(iommu->write_complete_reg); | 463 | (void) pci_iommu_read(iommu->write_complete_reg); |
418 | 464 | ||
@@ -466,7 +512,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int | |||
466 | 512 | ||
467 | /* Step 1: Kick data out of streaming buffers if necessary. */ | 513 | /* Step 1: Kick data out of streaming buffers if necessary. */ |
468 | if (strbuf->strbuf_enabled) | 514 | if (strbuf->strbuf_enabled) |
469 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 515 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
470 | 516 | ||
471 | /* Step 2: Clear out first TSB entry. */ | 517 | /* Step 2: Clear out first TSB entry. */ |
472 | iopte_make_dummy(iommu, base); | 518 | iopte_make_dummy(iommu, base); |
@@ -474,6 +520,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int | |||
474 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, | 520 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, |
475 | npages, ctx); | 521 | npages, ctx); |
476 | 522 | ||
523 | iommu_free_ctx(iommu, ctx); | ||
524 | |||
477 | spin_unlock_irqrestore(&iommu->lock, flags); | 525 | spin_unlock_irqrestore(&iommu->lock, flags); |
478 | } | 526 | } |
479 | 527 | ||
@@ -613,7 +661,7 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int | |||
613 | /* Step 4: Choose a context if necessary. */ | 661 | /* Step 4: Choose a context if necessary. */ |
614 | ctx = 0; | 662 | ctx = 0; |
615 | if (iommu->iommu_ctxflush) | 663 | if (iommu->iommu_ctxflush) |
616 | ctx = iommu->iommu_cur_ctx++; | 664 | ctx = iommu_alloc_ctx(iommu); |
617 | 665 | ||
618 | /* Step 5: Create the mappings. */ | 666 | /* Step 5: Create the mappings. */ |
619 | if (strbuf->strbuf_enabled) | 667 | if (strbuf->strbuf_enabled) |
@@ -678,7 +726,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, | |||
678 | 726 | ||
679 | /* Step 1: Kick data out of streaming buffers if necessary. */ | 727 | /* Step 1: Kick data out of streaming buffers if necessary. */ |
680 | if (strbuf->strbuf_enabled) | 728 | if (strbuf->strbuf_enabled) |
681 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 729 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
682 | 730 | ||
683 | /* Step 2: Clear out first TSB entry. */ | 731 | /* Step 2: Clear out first TSB entry. */ |
684 | iopte_make_dummy(iommu, base); | 732 | iopte_make_dummy(iommu, base); |
@@ -686,6 +734,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, | |||
686 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, | 734 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, |
687 | npages, ctx); | 735 | npages, ctx); |
688 | 736 | ||
737 | iommu_free_ctx(iommu, ctx); | ||
738 | |||
689 | spin_unlock_irqrestore(&iommu->lock, flags); | 739 | spin_unlock_irqrestore(&iommu->lock, flags); |
690 | } | 740 | } |
691 | 741 | ||
@@ -724,7 +774,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size | |||
724 | } | 774 | } |
725 | 775 | ||
726 | /* Step 2: Kick data out of streaming buffers. */ | 776 | /* Step 2: Kick data out of streaming buffers. */ |
727 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 777 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
728 | 778 | ||
729 | spin_unlock_irqrestore(&iommu->lock, flags); | 779 | spin_unlock_irqrestore(&iommu->lock, flags); |
730 | } | 780 | } |
@@ -768,7 +818,7 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i | |||
768 | i--; | 818 | i--; |
769 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) | 819 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) |
770 | - bus_addr) >> IO_PAGE_SHIFT; | 820 | - bus_addr) >> IO_PAGE_SHIFT; |
771 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 821 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
772 | 822 | ||
773 | spin_unlock_irqrestore(&iommu->lock, flags); | 823 | spin_unlock_irqrestore(&iommu->lock, flags); |
774 | } | 824 | } |
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 3567fa879e1f..534320ef0db2 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c | |||
@@ -1212,7 +1212,7 @@ static void __init psycho_iommu_init(struct pci_controller_info *p) | |||
1212 | 1212 | ||
1213 | /* Setup initial software IOMMU state. */ | 1213 | /* Setup initial software IOMMU state. */ |
1214 | spin_lock_init(&iommu->lock); | 1214 | spin_lock_init(&iommu->lock); |
1215 | iommu->iommu_cur_ctx = 0; | 1215 | iommu->ctx_lowest_free = 1; |
1216 | 1216 | ||
1217 | /* Register addresses. */ | 1217 | /* Register addresses. */ |
1218 | iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; | 1218 | iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; |
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 5525d1ec4af8..53d333b4a4e8 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c | |||
@@ -1265,7 +1265,7 @@ static void __init sabre_iommu_init(struct pci_controller_info *p, | |||
1265 | 1265 | ||
1266 | /* Setup initial software IOMMU state. */ | 1266 | /* Setup initial software IOMMU state. */ |
1267 | spin_lock_init(&iommu->lock); | 1267 | spin_lock_init(&iommu->lock); |
1268 | iommu->iommu_cur_ctx = 0; | 1268 | iommu->ctx_lowest_free = 1; |
1269 | 1269 | ||
1270 | /* Register addresses. */ | 1270 | /* Register addresses. */ |
1271 | iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; | 1271 | iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; |
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index e93fcadc3722..5753175b94e6 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c | |||
@@ -1753,7 +1753,7 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) | |||
1753 | 1753 | ||
1754 | /* Setup initial software IOMMU state. */ | 1754 | /* Setup initial software IOMMU state. */ |
1755 | spin_lock_init(&iommu->lock); | 1755 | spin_lock_init(&iommu->lock); |
1756 | iommu->iommu_cur_ctx = 0; | 1756 | iommu->ctx_lowest_free = 1; |
1757 | 1757 | ||
1758 | /* Register addresses, SCHIZO has iommu ctx flushing. */ | 1758 | /* Register addresses, SCHIZO has iommu ctx flushing. */ |
1759 | iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; | 1759 | iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; |
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index 76ea6455433f..89f5e019f24c 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c | |||
@@ -117,17 +117,25 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages | |||
117 | 117 | ||
118 | #define STRBUF_TAG_VALID 0x02UL | 118 | #define STRBUF_TAG_VALID 0x02UL |
119 | 119 | ||
120 | static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages) | 120 | static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction) |
121 | { | 121 | { |
122 | unsigned long n; | 122 | unsigned long n; |
123 | int limit; | 123 | int limit; |
124 | 124 | ||
125 | iommu->strbuf_flushflag = 0UL; | ||
126 | n = npages; | 125 | n = npages; |
127 | while (n--) | 126 | while (n--) |
128 | upa_writeq(base + (n << IO_PAGE_SHIFT), | 127 | upa_writeq(base + (n << IO_PAGE_SHIFT), |
129 | iommu->strbuf_regs + STRBUF_PFLUSH); | 128 | iommu->strbuf_regs + STRBUF_PFLUSH); |
130 | 129 | ||
130 | /* If the device could not have possibly put dirty data into | ||
131 | * the streaming cache, no flush-flag synchronization needs | ||
132 | * to be performed. | ||
133 | */ | ||
134 | if (direction == SBUS_DMA_TODEVICE) | ||
135 | return; | ||
136 | |||
137 | iommu->strbuf_flushflag = 0UL; | ||
138 | |||
131 | /* Whoopee cushion! */ | 139 | /* Whoopee cushion! */ |
132 | upa_writeq(__pa(&iommu->strbuf_flushflag), | 140 | upa_writeq(__pa(&iommu->strbuf_flushflag), |
133 | iommu->strbuf_regs + STRBUF_FSYNC); | 141 | iommu->strbuf_regs + STRBUF_FSYNC); |
@@ -421,7 +429,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, | |||
421 | 429 | ||
422 | spin_lock_irqsave(&iommu->lock, flags); | 430 | spin_lock_irqsave(&iommu->lock, flags); |
423 | free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); | 431 | free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); |
424 | sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT); | 432 | sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction); |
425 | spin_unlock_irqrestore(&iommu->lock, flags); | 433 | spin_unlock_irqrestore(&iommu->lock, flags); |
426 | } | 434 | } |
427 | 435 | ||
@@ -584,7 +592,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int | |||
584 | iommu = sdev->bus->iommu; | 592 | iommu = sdev->bus->iommu; |
585 | spin_lock_irqsave(&iommu->lock, flags); | 593 | spin_lock_irqsave(&iommu->lock, flags); |
586 | free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); | 594 | free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); |
587 | sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT); | 595 | sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction); |
588 | spin_unlock_irqrestore(&iommu->lock, flags); | 596 | spin_unlock_irqrestore(&iommu->lock, flags); |
589 | } | 597 | } |
590 | 598 | ||
@@ -596,7 +604,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t | |||
596 | size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); | 604 | size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); |
597 | 605 | ||
598 | spin_lock_irqsave(&iommu->lock, flags); | 606 | spin_lock_irqsave(&iommu->lock, flags); |
599 | sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT); | 607 | sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction); |
600 | spin_unlock_irqrestore(&iommu->lock, flags); | 608 | spin_unlock_irqrestore(&iommu->lock, flags); |
601 | } | 609 | } |
602 | 610 | ||
@@ -620,7 +628,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int | |||
620 | size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; | 628 | size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; |
621 | 629 | ||
622 | spin_lock_irqsave(&iommu->lock, flags); | 630 | spin_lock_irqsave(&iommu->lock, flags); |
623 | sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT); | 631 | sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction); |
624 | spin_unlock_irqrestore(&iommu->lock, flags); | 632 | spin_unlock_irqrestore(&iommu->lock, flags); |
625 | } | 633 | } |
626 | 634 | ||