aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/vio.c
diff options
context:
space:
mode:
authorAlistair Popple <alistair@popple.id.au>2013-12-09 02:17:01 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-12-29 22:17:06 -0500
commite589a4404fa06730355de204d3d136ed9bbc7dea (patch)
treea7b4d1dad98a06d89e652194947735db7eec02b5 /arch/powerpc/kernel/vio.c
parentfee26f6d5d68a8815b20c32d15dd70d5384eb937 (diff)
powerpc/iommu: Update constant names to reflect their hardcoded page size
The powerpc iommu uses a hardcoded page size of 4K. This patch changes the name of the IOMMU_PAGE_* macros to reflect the hardcoded values. A future patch will use the existing names to support dynamic page sizes. Signed-off-by: Alistair Popple <alistair@popple.id.au> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/vio.c')
-rw-r--r--arch/powerpc/kernel/vio.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 76a64821f4a2..2e89fa350763 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -520,14 +520,14 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
520 struct vio_dev *viodev = to_vio_dev(dev); 520 struct vio_dev *viodev = to_vio_dev(dev);
521 dma_addr_t ret = DMA_ERROR_CODE; 521 dma_addr_t ret = DMA_ERROR_CODE;
522 522
523 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) { 523 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K))) {
524 atomic_inc(&viodev->cmo.allocs_failed); 524 atomic_inc(&viodev->cmo.allocs_failed);
525 return ret; 525 return ret;
526 } 526 }
527 527
528 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); 528 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
529 if (unlikely(dma_mapping_error(dev, ret))) { 529 if (unlikely(dma_mapping_error(dev, ret))) {
530 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 530 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
531 atomic_inc(&viodev->cmo.allocs_failed); 531 atomic_inc(&viodev->cmo.allocs_failed);
532 } 532 }
533 533
@@ -543,7 +543,7 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
543 543
544 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); 544 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
545 545
546 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 546 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
547} 547}
548 548
549static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 549static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -556,7 +556,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
556 size_t alloc_size = 0; 556 size_t alloc_size = 0;
557 557
558 for (sgl = sglist; count < nelems; count++, sgl++) 558 for (sgl = sglist; count < nelems; count++, sgl++)
559 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE); 559 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE_4K);
560 560
561 if (vio_cmo_alloc(viodev, alloc_size)) { 561 if (vio_cmo_alloc(viodev, alloc_size)) {
562 atomic_inc(&viodev->cmo.allocs_failed); 562 atomic_inc(&viodev->cmo.allocs_failed);
@@ -572,7 +572,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
572 } 572 }
573 573
574 for (sgl = sglist, count = 0; count < ret; count++, sgl++) 574 for (sgl = sglist, count = 0; count < ret; count++, sgl++)
575 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 575 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
576 if (alloc_size) 576 if (alloc_size)
577 vio_cmo_dealloc(viodev, alloc_size); 577 vio_cmo_dealloc(viodev, alloc_size);
578 578
@@ -590,7 +590,7 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
590 int count = 0; 590 int count = 0;
591 591
592 for (sgl = sglist; count < nelems; count++, sgl++) 592 for (sgl = sglist; count < nelems; count++, sgl++)
593 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 593 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
594 594
595 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); 595 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
596 596
@@ -736,7 +736,8 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
736 return -EINVAL; 736 return -EINVAL;
737 } 737 }
738 738
739 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev)); 739 viodev->cmo.desired =
740 IOMMU_PAGE_ALIGN_4K(viodrv->get_desired_dma(viodev));
740 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) 741 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
741 viodev->cmo.desired = VIO_CMO_MIN_ENT; 742 viodev->cmo.desired = VIO_CMO_MIN_ENT;
742 size = VIO_CMO_MIN_ENT; 743 size = VIO_CMO_MIN_ENT;
@@ -1176,9 +1177,9 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1176 &tbl->it_index, &offset, &size); 1177 &tbl->it_index, &offset, &size);
1177 1178
1178 /* TCE table size - measured in tce entries */ 1179 /* TCE table size - measured in tce entries */
1179 tbl->it_size = size >> IOMMU_PAGE_SHIFT; 1180 tbl->it_size = size >> IOMMU_PAGE_SHIFT_4K;
1180 /* offset for VIO should always be 0 */ 1181 /* offset for VIO should always be 0 */
1181 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; 1182 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT_4K;
1182 tbl->it_busno = 0; 1183 tbl->it_busno = 0;
1183 tbl->it_type = TCE_VB; 1184 tbl->it_type = TCE_VB;
1184 tbl->it_blocksize = 16; 1185 tbl->it_blocksize = 16;