aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/iommu.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@huronp11.davemloft.net>2008-02-06 06:50:26 -0500
committerDavid S. Miller <davem@davemloft.net>2008-02-06 07:12:25 -0500
commit38192d52f159bc06b7f523800c10b583cdd661d5 (patch)
tree4cf695d583c0a657133642c0299cbfa536e25663 /arch/sparc64/kernel/iommu.c
parentb3ff81dd8ae29ec431f6cc91aff601a51ef6fb8c (diff)
[SPARC64]: Temporarily remove IOMMU merging code.
Changeset fde6a3c82d67f592eb587be4d12222b0ae6d4321 ("iommu sg merging: sparc64: make iommu respect the segment size limits") broke sparc64 because whilst it added the segment limiting code to the first pass of SG mapping (in prepare_sg()) it did not add matching code to the second pass handling (in fill_sg()) As a result the two passes disagree where the segment boundaries should be, resulting in OOPSes, DMA corruption, and corrupted superblocks. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/iommu.c')
-rw-r--r--arch/sparc64/kernel/iommu.c142
1 files changed, 26 insertions, 116 deletions
diff --git a/arch/sparc64/kernel/iommu.c b/arch/sparc64/kernel/iommu.c
index 4b9115a4d92e..5623a4d59dff 100644
--- a/arch/sparc64/kernel/iommu.c
+++ b/arch/sparc64/kernel/iommu.c
@@ -472,94 +472,15 @@ static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
472 spin_unlock_irqrestore(&iommu->lock, flags); 472 spin_unlock_irqrestore(&iommu->lock, flags);
473} 473}
474 474
475#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
476
477static void fill_sg(iopte_t *iopte, struct scatterlist *sg,
478 int nused, int nelems,
479 unsigned long iopte_protection)
480{
481 struct scatterlist *dma_sg = sg;
482 int i;
483
484 for (i = 0; i < nused; i++) {
485 unsigned long pteval = ~0UL;
486 u32 dma_npages;
487
488 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
489 dma_sg->dma_length +
490 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
491 do {
492 unsigned long offset;
493 signed int len;
494
495 /* If we are here, we know we have at least one
496 * more page to map. So walk forward until we
497 * hit a page crossing, and begin creating new
498 * mappings from that spot.
499 */
500 for (;;) {
501 unsigned long tmp;
502
503 tmp = SG_ENT_PHYS_ADDRESS(sg);
504 len = sg->length;
505 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
506 pteval = tmp & IO_PAGE_MASK;
507 offset = tmp & (IO_PAGE_SIZE - 1UL);
508 break;
509 }
510 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
511 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
512 offset = 0UL;
513 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
514 break;
515 }
516 sg = sg_next(sg);
517 nelems--;
518 }
519
520 pteval = iopte_protection | (pteval & IOPTE_PAGE);
521 while (len > 0) {
522 *iopte++ = __iopte(pteval);
523 pteval += IO_PAGE_SIZE;
524 len -= (IO_PAGE_SIZE - offset);
525 offset = 0;
526 dma_npages--;
527 }
528
529 pteval = (pteval & IOPTE_PAGE) + len;
530 sg = sg_next(sg);
531 nelems--;
532
533 /* Skip over any tail mappings we've fully mapped,
534 * adjusting pteval along the way. Stop when we
535 * detect a page crossing event.
536 */
537 while (nelems &&
538 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
539 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
540 ((pteval ^
541 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
542 pteval += sg->length;
543 sg = sg_next(sg);
544 nelems--;
545 }
546 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
547 pteval = ~0UL;
548 } while (dma_npages != 0);
549 dma_sg = sg_next(dma_sg);
550 }
551}
552
553static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, 475static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
554 int nelems, enum dma_data_direction direction) 476 int nelems, enum dma_data_direction direction)
555{ 477{
556 struct iommu *iommu; 478 unsigned long flags, ctx, i, npages, iopte_protection;
479 struct scatterlist *sg;
557 struct strbuf *strbuf; 480 struct strbuf *strbuf;
558 unsigned long flags, ctx, npages, iopte_protection; 481 struct iommu *iommu;
559 iopte_t *base; 482 iopte_t *base;
560 u32 dma_base; 483 u32 dma_base;
561 struct scatterlist *sgtmp;
562 int used;
563 484
564 /* Fast path single entry scatterlists. */ 485 /* Fast path single entry scatterlists. */
565 if (nelems == 1) { 486 if (nelems == 1) {
@@ -578,11 +499,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
578 if (unlikely(direction == DMA_NONE)) 499 if (unlikely(direction == DMA_NONE))
579 goto bad_no_ctx; 500 goto bad_no_ctx;
580 501
581 /* Step 1: Prepare scatter list. */ 502 npages = calc_npages(sglist, nelems);
582
583 npages = prepare_sg(dev, sglist, nelems);
584
585 /* Step 2: Allocate a cluster and context, if necessary. */
586 503
587 spin_lock_irqsave(&iommu->lock, flags); 504 spin_lock_irqsave(&iommu->lock, flags);
588 505
@@ -599,18 +516,6 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
599 dma_base = iommu->page_table_map_base + 516 dma_base = iommu->page_table_map_base +
600 ((base - iommu->page_table) << IO_PAGE_SHIFT); 517 ((base - iommu->page_table) << IO_PAGE_SHIFT);
601 518
602 /* Step 3: Normalize DMA addresses. */
603 used = nelems;
604
605 sgtmp = sglist;
606 while (used && sgtmp->dma_length) {
607 sgtmp->dma_address += dma_base;
608 sgtmp = sg_next(sgtmp);
609 used--;
610 }
611 used = nelems - used;
612
613 /* Step 4: Create the mappings. */
614 if (strbuf->strbuf_enabled) 519 if (strbuf->strbuf_enabled)
615 iopte_protection = IOPTE_STREAMING(ctx); 520 iopte_protection = IOPTE_STREAMING(ctx);
616 else 521 else
@@ -618,13 +523,27 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
618 if (direction != DMA_TO_DEVICE) 523 if (direction != DMA_TO_DEVICE)
619 iopte_protection |= IOPTE_WRITE; 524 iopte_protection |= IOPTE_WRITE;
620 525
621 fill_sg(base, sglist, used, nelems, iopte_protection); 526 for_each_sg(sglist, sg, nelems, i) {
527 unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
528 unsigned long slen = sg->length;
529 unsigned long this_npages;
622 530
623#ifdef VERIFY_SG 531 this_npages = iommu_num_pages(paddr, slen);
624 verify_sglist(sglist, nelems, base, npages);
625#endif
626 532
627 return used; 533 sg->dma_address = dma_base | (paddr & ~IO_PAGE_MASK);
534 sg->dma_length = slen;
535
536 paddr &= IO_PAGE_MASK;
537 while (this_npages--) {
538 iopte_val(*base) = iopte_protection | paddr;
539
540 base++;
541 paddr += IO_PAGE_SIZE;
542 dma_base += IO_PAGE_SIZE;
543 }
544 }
545
546 return nelems;
628 547
629bad: 548bad:
630 iommu_free_ctx(iommu, ctx); 549 iommu_free_ctx(iommu, ctx);
@@ -637,11 +556,10 @@ bad_no_ctx:
637static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist, 556static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
638 int nelems, enum dma_data_direction direction) 557 int nelems, enum dma_data_direction direction)
639{ 558{
640 struct iommu *iommu; 559 unsigned long flags, ctx, i, npages;
641 struct strbuf *strbuf; 560 struct strbuf *strbuf;
561 struct iommu *iommu;
642 iopte_t *base; 562 iopte_t *base;
643 unsigned long flags, ctx, i, npages;
644 struct scatterlist *sg, *sgprv;
645 u32 bus_addr; 563 u32 bus_addr;
646 564
647 if (unlikely(direction == DMA_NONE)) { 565 if (unlikely(direction == DMA_NONE)) {
@@ -654,15 +572,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
654 572
655 bus_addr = sglist->dma_address & IO_PAGE_MASK; 573 bus_addr = sglist->dma_address & IO_PAGE_MASK;
656 574
657 sgprv = NULL; 575 npages = calc_npages(sglist, nelems);
658 for_each_sg(sglist, sg, nelems, i) {
659 if (sg->dma_length == 0)
660 break;
661 sgprv = sg;
662 }
663
664 npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
665 bus_addr) >> IO_PAGE_SHIFT;
666 576
667 base = iommu->page_table + 577 base = iommu->page_table +
668 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT); 578 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);