aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/vio.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/vio.c')
-rw-r--r--arch/powerpc/kernel/vio.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 76a64821f4a2..826d8bd9e522 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -518,16 +518,18 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
518 struct dma_attrs *attrs) 518 struct dma_attrs *attrs)
519{ 519{
520 struct vio_dev *viodev = to_vio_dev(dev); 520 struct vio_dev *viodev = to_vio_dev(dev);
521 struct iommu_table *tbl;
521 dma_addr_t ret = DMA_ERROR_CODE; 522 dma_addr_t ret = DMA_ERROR_CODE;
522 523
523 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) { 524 tbl = get_iommu_table_base(dev);
525 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
524 atomic_inc(&viodev->cmo.allocs_failed); 526 atomic_inc(&viodev->cmo.allocs_failed);
525 return ret; 527 return ret;
526 } 528 }
527 529
528 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); 530 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
529 if (unlikely(dma_mapping_error(dev, ret))) { 531 if (unlikely(dma_mapping_error(dev, ret))) {
530 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
531 atomic_inc(&viodev->cmo.allocs_failed); 533 atomic_inc(&viodev->cmo.allocs_failed);
532 } 534 }
533 535
@@ -540,10 +542,12 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
540 struct dma_attrs *attrs) 542 struct dma_attrs *attrs)
541{ 543{
542 struct vio_dev *viodev = to_vio_dev(dev); 544 struct vio_dev *viodev = to_vio_dev(dev);
545 struct iommu_table *tbl;
543 546
547 tbl = get_iommu_table_base(dev);
544 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); 548 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
545 549
546 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
547} 551}
548 552
549static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 553static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -551,12 +555,14 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
551 struct dma_attrs *attrs) 555 struct dma_attrs *attrs)
552{ 556{
553 struct vio_dev *viodev = to_vio_dev(dev); 557 struct vio_dev *viodev = to_vio_dev(dev);
558 struct iommu_table *tbl;
554 struct scatterlist *sgl; 559 struct scatterlist *sgl;
555 int ret, count = 0; 560 int ret, count = 0;
556 size_t alloc_size = 0; 561 size_t alloc_size = 0;
557 562
563 tbl = get_iommu_table_base(dev);
558 for (sgl = sglist; count < nelems; count++, sgl++) 564 for (sgl = sglist; count < nelems; count++, sgl++)
559 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE); 565 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
560 566
561 if (vio_cmo_alloc(viodev, alloc_size)) { 567 if (vio_cmo_alloc(viodev, alloc_size)) {
562 atomic_inc(&viodev->cmo.allocs_failed); 568 atomic_inc(&viodev->cmo.allocs_failed);
@@ -572,7 +578,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
572 } 578 }
573 579
574 for (sgl = sglist, count = 0; count < ret; count++, sgl++) 580 for (sgl = sglist, count = 0; count < ret; count++, sgl++)
575 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 581 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
576 if (alloc_size) 582 if (alloc_size)
577 vio_cmo_dealloc(viodev, alloc_size); 583 vio_cmo_dealloc(viodev, alloc_size);
578 584
@@ -585,12 +591,14 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
585 struct dma_attrs *attrs) 591 struct dma_attrs *attrs)
586{ 592{
587 struct vio_dev *viodev = to_vio_dev(dev); 593 struct vio_dev *viodev = to_vio_dev(dev);
594 struct iommu_table *tbl;
588 struct scatterlist *sgl; 595 struct scatterlist *sgl;
589 size_t alloc_size = 0; 596 size_t alloc_size = 0;
590 int count = 0; 597 int count = 0;
591 598
599 tbl = get_iommu_table_base(dev);
592 for (sgl = sglist; count < nelems; count++, sgl++) 600 for (sgl = sglist; count < nelems; count++, sgl++)
593 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 601 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
594 602
595 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); 603 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
596 604
@@ -706,11 +714,14 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
706{ 714{
707 struct vio_cmo_dev_entry *dev_ent; 715 struct vio_cmo_dev_entry *dev_ent;
708 struct device *dev = &viodev->dev; 716 struct device *dev = &viodev->dev;
717 struct iommu_table *tbl;
709 struct vio_driver *viodrv = to_vio_driver(dev->driver); 718 struct vio_driver *viodrv = to_vio_driver(dev->driver);
710 unsigned long flags; 719 unsigned long flags;
711 size_t size; 720 size_t size;
712 bool dma_capable = false; 721 bool dma_capable = false;
713 722
723 tbl = get_iommu_table_base(dev);
724
714 /* A device requires entitlement if it has a DMA window property */ 725 /* A device requires entitlement if it has a DMA window property */
715 switch (viodev->family) { 726 switch (viodev->family) {
716 case VDEVICE: 727 case VDEVICE:
@@ -736,7 +747,8 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
736 return -EINVAL; 747 return -EINVAL;
737 } 748 }
738 749
739 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev)); 750 viodev->cmo.desired =
751 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
740 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) 752 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
741 viodev->cmo.desired = VIO_CMO_MIN_ENT; 753 viodev->cmo.desired = VIO_CMO_MIN_ENT;
742 size = VIO_CMO_MIN_ENT; 754 size = VIO_CMO_MIN_ENT;
@@ -1176,9 +1188,10 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1176 &tbl->it_index, &offset, &size); 1188 &tbl->it_index, &offset, &size);
1177 1189
1178 /* TCE table size - measured in tce entries */ 1190 /* TCE table size - measured in tce entries */
1179 tbl->it_size = size >> IOMMU_PAGE_SHIFT; 1191 tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
1192 tbl->it_size = size >> tbl->it_page_shift;
1180 /* offset for VIO should always be 0 */ 1193 /* offset for VIO should always be 0 */
1181 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; 1194 tbl->it_offset = offset >> tbl->it_page_shift;
1182 tbl->it_busno = 0; 1195 tbl->it_busno = 0;
1183 tbl->it_type = TCE_VB; 1196 tbl->it_type = TCE_VB;
1184 tbl->it_blocksize = 16; 1197 tbl->it_blocksize = 16;