aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/iommu.h10
-rw-r--r--arch/powerpc/kernel/dma-iommu.c4
-rw-r--r--arch/powerpc/kernel/iommu.c78
-rw-r--r--arch/powerpc/kernel/vio.c19
-rw-r--r--arch/powerpc/platforms/cell/iommu.c12
-rw-r--r--arch/powerpc/platforms/powernv/pci.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c8
-rw-r--r--arch/powerpc/platforms/pseries/setup.c4
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c9
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c28
11 files changed, 94 insertions, 92 deletions
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 774fa2776907..0869c7e74421 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -30,10 +30,10 @@
30#include <asm/machdep.h> 30#include <asm/machdep.h>
31#include <asm/types.h> 31#include <asm/types.h>
32 32
33#define IOMMU_PAGE_SHIFT 12 33#define IOMMU_PAGE_SHIFT_4K 12
34#define IOMMU_PAGE_SIZE (ASM_CONST(1) << IOMMU_PAGE_SHIFT) 34#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
35#define IOMMU_PAGE_MASK (~((1 << IOMMU_PAGE_SHIFT) - 1)) 35#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
36#define IOMMU_PAGE_ALIGN(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE) 36#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
37 37
38/* Boot time flags */ 38/* Boot time flags */
39extern int iommu_is_off; 39extern int iommu_is_off;
@@ -42,7 +42,7 @@ extern int iommu_force_on;
42/* Pure 2^n version of get_order */ 42/* Pure 2^n version of get_order */
43static __inline__ __attribute_const__ int get_iommu_order(unsigned long size) 43static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
44{ 44{
45 return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT) + 1; 45 return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT_4K) + 1;
46} 46}
47 47
48 48
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index e4897523de41..5cfe3dbfc4a0 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -83,10 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
83 return 0; 83 return 0;
84 } 84 }
85 85
86 if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) { 86 if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT_4K)) {
87 dev_info(dev, "Warning: IOMMU offset too big for device mask\n"); 87 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
88 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n", 88 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
89 mask, tbl->it_offset << IOMMU_PAGE_SHIFT); 89 mask, tbl->it_offset << IOMMU_PAGE_SHIFT_4K);
90 return 0; 90 return 0;
91 } else 91 } else
92 return 1; 92 return 1;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d22abe0b08e3..df4a7f1b7444 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -251,14 +251,14 @@ again:
251 251
252 if (dev) 252 if (dev)
253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
254 1 << IOMMU_PAGE_SHIFT); 254 1 << IOMMU_PAGE_SHIFT_4K);
255 else 255 else
256 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT); 256 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT_4K);
257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ 257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
258 258
259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, 259 n = iommu_area_alloc(tbl->it_map, limit, start, npages,
260 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, 260 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT_4K,
261 align_mask); 261 align_mask);
262 if (n == -1) { 262 if (n == -1) {
263 if (likely(pass == 0)) { 263 if (likely(pass == 0)) {
264 /* First try the pool from the start */ 264 /* First try the pool from the start */
@@ -320,12 +320,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
320 return DMA_ERROR_CODE; 320 return DMA_ERROR_CODE;
321 321
322 entry += tbl->it_offset; /* Offset into real TCE table */ 322 entry += tbl->it_offset; /* Offset into real TCE table */
323 ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ 323 ret = entry << IOMMU_PAGE_SHIFT_4K; /* Set the return dma address */
324 324
325 /* Put the TCEs in the HW table */ 325 /* Put the TCEs in the HW table */
326 build_fail = ppc_md.tce_build(tbl, entry, npages, 326 build_fail = ppc_md.tce_build(tbl, entry, npages,
327 (unsigned long)page & IOMMU_PAGE_MASK, 327 (unsigned long)page & IOMMU_PAGE_MASK_4K,
328 direction, attrs); 328 direction, attrs);
329 329
330 /* ppc_md.tce_build() only returns non-zero for transient errors. 330 /* ppc_md.tce_build() only returns non-zero for transient errors.
331 * Clean up the table bitmap in this case and return 331 * Clean up the table bitmap in this case and return
@@ -352,7 +352,7 @@ static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
352{ 352{
353 unsigned long entry, free_entry; 353 unsigned long entry, free_entry;
354 354
355 entry = dma_addr >> IOMMU_PAGE_SHIFT; 355 entry = dma_addr >> IOMMU_PAGE_SHIFT_4K;
356 free_entry = entry - tbl->it_offset; 356 free_entry = entry - tbl->it_offset;
357 357
358 if (((free_entry + npages) > tbl->it_size) || 358 if (((free_entry + npages) > tbl->it_size) ||
@@ -401,7 +401,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
401 unsigned long flags; 401 unsigned long flags;
402 struct iommu_pool *pool; 402 struct iommu_pool *pool;
403 403
404 entry = dma_addr >> IOMMU_PAGE_SHIFT; 404 entry = dma_addr >> IOMMU_PAGE_SHIFT_4K;
405 free_entry = entry - tbl->it_offset; 405 free_entry = entry - tbl->it_offset;
406 406
407 pool = get_pool(tbl, free_entry); 407 pool = get_pool(tbl, free_entry);
@@ -468,13 +468,13 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
468 } 468 }
469 /* Allocate iommu entries for that segment */ 469 /* Allocate iommu entries for that segment */
470 vaddr = (unsigned long) sg_virt(s); 470 vaddr = (unsigned long) sg_virt(s);
471 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE); 471 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE_4K);
472 align = 0; 472 align = 0;
473 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && 473 if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && slen >= PAGE_SIZE &&
474 (vaddr & ~PAGE_MASK) == 0) 474 (vaddr & ~PAGE_MASK) == 0)
475 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 475 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K;
476 entry = iommu_range_alloc(dev, tbl, npages, &handle, 476 entry = iommu_range_alloc(dev, tbl, npages, &handle,
477 mask >> IOMMU_PAGE_SHIFT, align); 477 mask >> IOMMU_PAGE_SHIFT_4K, align);
478 478
479 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 479 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
480 480
@@ -489,16 +489,16 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
489 489
490 /* Convert entry to a dma_addr_t */ 490 /* Convert entry to a dma_addr_t */
491 entry += tbl->it_offset; 491 entry += tbl->it_offset;
492 dma_addr = entry << IOMMU_PAGE_SHIFT; 492 dma_addr = entry << IOMMU_PAGE_SHIFT_4K;
493 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); 493 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK_4K);
494 494
495 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", 495 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
496 npages, entry, dma_addr); 496 npages, entry, dma_addr);
497 497
498 /* Insert into HW table */ 498 /* Insert into HW table */
499 build_fail = ppc_md.tce_build(tbl, entry, npages, 499 build_fail = ppc_md.tce_build(tbl, entry, npages,
500 vaddr & IOMMU_PAGE_MASK, 500 vaddr & IOMMU_PAGE_MASK_4K,
501 direction, attrs); 501 direction, attrs);
502 if(unlikely(build_fail)) 502 if(unlikely(build_fail))
503 goto failure; 503 goto failure;
504 504
@@ -559,9 +559,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
559 if (s->dma_length != 0) { 559 if (s->dma_length != 0) {
560 unsigned long vaddr, npages; 560 unsigned long vaddr, npages;
561 561
562 vaddr = s->dma_address & IOMMU_PAGE_MASK; 562 vaddr = s->dma_address & IOMMU_PAGE_MASK_4K;
563 npages = iommu_num_pages(s->dma_address, s->dma_length, 563 npages = iommu_num_pages(s->dma_address, s->dma_length,
564 IOMMU_PAGE_SIZE); 564 IOMMU_PAGE_SIZE_4K);
565 __iommu_free(tbl, vaddr, npages); 565 __iommu_free(tbl, vaddr, npages);
566 s->dma_address = DMA_ERROR_CODE; 566 s->dma_address = DMA_ERROR_CODE;
567 s->dma_length = 0; 567 s->dma_length = 0;
@@ -592,7 +592,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
592 if (sg->dma_length == 0) 592 if (sg->dma_length == 0)
593 break; 593 break;
594 npages = iommu_num_pages(dma_handle, sg->dma_length, 594 npages = iommu_num_pages(dma_handle, sg->dma_length,
595 IOMMU_PAGE_SIZE); 595 IOMMU_PAGE_SIZE_4K);
596 __iommu_free(tbl, dma_handle, npages); 596 __iommu_free(tbl, dma_handle, npages);
597 sg = sg_next(sg); 597 sg = sg_next(sg);
598 } 598 }
@@ -676,7 +676,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
676 set_bit(0, tbl->it_map); 676 set_bit(0, tbl->it_map);
677 677
678 /* We only split the IOMMU table if we have 1GB or more of space */ 678 /* We only split the IOMMU table if we have 1GB or more of space */
679 if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024)) 679 if ((tbl->it_size << IOMMU_PAGE_SHIFT_4K) >= (1UL * 1024 * 1024 * 1024))
680 tbl->nr_pools = IOMMU_NR_POOLS; 680 tbl->nr_pools = IOMMU_NR_POOLS;
681 else 681 else
682 tbl->nr_pools = 1; 682 tbl->nr_pools = 1;
@@ -768,16 +768,16 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
768 768
769 vaddr = page_address(page) + offset; 769 vaddr = page_address(page) + offset;
770 uaddr = (unsigned long)vaddr; 770 uaddr = (unsigned long)vaddr;
771 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); 771 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE_4K);
772 772
773 if (tbl) { 773 if (tbl) {
774 align = 0; 774 align = 0;
775 if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE && 775 if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && size >= PAGE_SIZE &&
776 ((unsigned long)vaddr & ~PAGE_MASK) == 0) 776 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
777 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; 777 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K;
778 778
779 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, 779 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
780 mask >> IOMMU_PAGE_SHIFT, align, 780 mask >> IOMMU_PAGE_SHIFT_4K, align,
781 attrs); 781 attrs);
782 if (dma_handle == DMA_ERROR_CODE) { 782 if (dma_handle == DMA_ERROR_CODE) {
783 if (printk_ratelimit()) { 783 if (printk_ratelimit()) {
@@ -786,7 +786,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
786 npages); 786 npages);
787 } 787 }
788 } else 788 } else
789 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); 789 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK_4K);
790 } 790 }
791 791
792 return dma_handle; 792 return dma_handle;
@@ -801,7 +801,7 @@ void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
801 BUG_ON(direction == DMA_NONE); 801 BUG_ON(direction == DMA_NONE);
802 802
803 if (tbl) { 803 if (tbl) {
804 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE); 804 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE_4K);
805 iommu_free(tbl, dma_handle, npages); 805 iommu_free(tbl, dma_handle, npages);
806 } 806 }
807} 807}
@@ -845,10 +845,10 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
845 memset(ret, 0, size); 845 memset(ret, 0, size);
846 846
847 /* Set up tces to cover the allocated range */ 847 /* Set up tces to cover the allocated range */
848 nio_pages = size >> IOMMU_PAGE_SHIFT; 848 nio_pages = size >> IOMMU_PAGE_SHIFT_4K;
849 io_order = get_iommu_order(size); 849 io_order = get_iommu_order(size);
850 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 850 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
851 mask >> IOMMU_PAGE_SHIFT, io_order, NULL); 851 mask >> IOMMU_PAGE_SHIFT_4K, io_order, NULL);
852 if (mapping == DMA_ERROR_CODE) { 852 if (mapping == DMA_ERROR_CODE) {
853 free_pages((unsigned long)ret, order); 853 free_pages((unsigned long)ret, order);
854 return NULL; 854 return NULL;
@@ -864,7 +864,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
864 unsigned int nio_pages; 864 unsigned int nio_pages;
865 865
866 size = PAGE_ALIGN(size); 866 size = PAGE_ALIGN(size);
867 nio_pages = size >> IOMMU_PAGE_SHIFT; 867 nio_pages = size >> IOMMU_PAGE_SHIFT_4K;
868 iommu_free(tbl, dma_handle, nio_pages); 868 iommu_free(tbl, dma_handle, nio_pages);
869 size = PAGE_ALIGN(size); 869 size = PAGE_ALIGN(size);
870 free_pages((unsigned long)vaddr, get_order(size)); 870 free_pages((unsigned long)vaddr, get_order(size));
@@ -935,10 +935,10 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl,
935 if (tce_value) 935 if (tce_value)
936 return -EINVAL; 936 return -EINVAL;
937 937
938 if (ioba & ~IOMMU_PAGE_MASK) 938 if (ioba & ~IOMMU_PAGE_MASK_4K)
939 return -EINVAL; 939 return -EINVAL;
940 940
941 ioba >>= IOMMU_PAGE_SHIFT; 941 ioba >>= IOMMU_PAGE_SHIFT_4K;
942 if (ioba < tbl->it_offset) 942 if (ioba < tbl->it_offset)
943 return -EINVAL; 943 return -EINVAL;
944 944
@@ -955,13 +955,13 @@ int iommu_tce_put_param_check(struct iommu_table *tbl,
955 if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ))) 955 if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
956 return -EINVAL; 956 return -EINVAL;
957 957
958 if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ)) 958 if (tce & ~(IOMMU_PAGE_MASK_4K | TCE_PCI_WRITE | TCE_PCI_READ))
959 return -EINVAL; 959 return -EINVAL;
960 960
961 if (ioba & ~IOMMU_PAGE_MASK) 961 if (ioba & ~IOMMU_PAGE_MASK_4K)
962 return -EINVAL; 962 return -EINVAL;
963 963
964 ioba >>= IOMMU_PAGE_SHIFT; 964 ioba >>= IOMMU_PAGE_SHIFT_4K;
965 if (ioba < tbl->it_offset) 965 if (ioba < tbl->it_offset)
966 return -EINVAL; 966 return -EINVAL;
967 967
@@ -1037,7 +1037,7 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
1037 1037
1038 /* if (unlikely(ret)) 1038 /* if (unlikely(ret))
1039 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", 1039 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
1040 __func__, hwaddr, entry << IOMMU_PAGE_SHIFT, 1040 __func__, hwaddr, entry << IOMMU_PAGE_SHIFT_4K,
1041 hwaddr, ret); */ 1041 hwaddr, ret); */
1042 1042
1043 return ret; 1043 return ret;
@@ -1049,14 +1049,14 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
1049{ 1049{
1050 int ret; 1050 int ret;
1051 struct page *page = NULL; 1051 struct page *page = NULL;
1052 unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK; 1052 unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK_4K & ~PAGE_MASK;
1053 enum dma_data_direction direction = iommu_tce_direction(tce); 1053 enum dma_data_direction direction = iommu_tce_direction(tce);
1054 1054
1055 ret = get_user_pages_fast(tce & PAGE_MASK, 1, 1055 ret = get_user_pages_fast(tce & PAGE_MASK, 1,
1056 direction != DMA_TO_DEVICE, &page); 1056 direction != DMA_TO_DEVICE, &page);
1057 if (unlikely(ret != 1)) { 1057 if (unlikely(ret != 1)) {
1058 /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n", 1058 /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
1059 tce, entry << IOMMU_PAGE_SHIFT, ret); */ 1059 tce, entry << IOMMU_PAGE_SHIFT_4K, ret); */
1060 return -EFAULT; 1060 return -EFAULT;
1061 } 1061 }
1062 hwaddr = (unsigned long) page_address(page) + offset; 1062 hwaddr = (unsigned long) page_address(page) + offset;
@@ -1067,7 +1067,7 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
1067 1067
1068 if (ret < 0) 1068 if (ret < 0)
1069 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n", 1069 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
1070 __func__, entry << IOMMU_PAGE_SHIFT, tce, ret); 1070 __func__, entry << IOMMU_PAGE_SHIFT_4K, tce, ret);
1071 1071
1072 return ret; 1072 return ret;
1073} 1073}
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 76a64821f4a2..2e89fa350763 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -520,14 +520,14 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
520 struct vio_dev *viodev = to_vio_dev(dev); 520 struct vio_dev *viodev = to_vio_dev(dev);
521 dma_addr_t ret = DMA_ERROR_CODE; 521 dma_addr_t ret = DMA_ERROR_CODE;
522 522
523 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) { 523 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K))) {
524 atomic_inc(&viodev->cmo.allocs_failed); 524 atomic_inc(&viodev->cmo.allocs_failed);
525 return ret; 525 return ret;
526 } 526 }
527 527
528 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); 528 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
529 if (unlikely(dma_mapping_error(dev, ret))) { 529 if (unlikely(dma_mapping_error(dev, ret))) {
530 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 530 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
531 atomic_inc(&viodev->cmo.allocs_failed); 531 atomic_inc(&viodev->cmo.allocs_failed);
532 } 532 }
533 533
@@ -543,7 +543,7 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
543 543
544 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); 544 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
545 545
546 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE)); 546 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K));
547} 547}
548 548
549static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 549static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -556,7 +556,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
556 size_t alloc_size = 0; 556 size_t alloc_size = 0;
557 557
558 for (sgl = sglist; count < nelems; count++, sgl++) 558 for (sgl = sglist; count < nelems; count++, sgl++)
559 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE); 559 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE_4K);
560 560
561 if (vio_cmo_alloc(viodev, alloc_size)) { 561 if (vio_cmo_alloc(viodev, alloc_size)) {
562 atomic_inc(&viodev->cmo.allocs_failed); 562 atomic_inc(&viodev->cmo.allocs_failed);
@@ -572,7 +572,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
572 } 572 }
573 573
574 for (sgl = sglist, count = 0; count < ret; count++, sgl++) 574 for (sgl = sglist, count = 0; count < ret; count++, sgl++)
575 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 575 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
576 if (alloc_size) 576 if (alloc_size)
577 vio_cmo_dealloc(viodev, alloc_size); 577 vio_cmo_dealloc(viodev, alloc_size);
578 578
@@ -590,7 +590,7 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
590 int count = 0; 590 int count = 0;
591 591
592 for (sgl = sglist; count < nelems; count++, sgl++) 592 for (sgl = sglist; count < nelems; count++, sgl++)
593 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE); 593 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K);
594 594
595 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); 595 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
596 596
@@ -736,7 +736,8 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
736 return -EINVAL; 736 return -EINVAL;
737 } 737 }
738 738
739 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev)); 739 viodev->cmo.desired =
740 IOMMU_PAGE_ALIGN_4K(viodrv->get_desired_dma(viodev));
740 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) 741 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
741 viodev->cmo.desired = VIO_CMO_MIN_ENT; 742 viodev->cmo.desired = VIO_CMO_MIN_ENT;
742 size = VIO_CMO_MIN_ENT; 743 size = VIO_CMO_MIN_ENT;
@@ -1176,9 +1177,9 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1176 &tbl->it_index, &offset, &size); 1177 &tbl->it_index, &offset, &size);
1177 1178
1178 /* TCE table size - measured in tce entries */ 1179 /* TCE table size - measured in tce entries */
1179 tbl->it_size = size >> IOMMU_PAGE_SHIFT; 1180 tbl->it_size = size >> IOMMU_PAGE_SHIFT_4K;
1180 /* offset for VIO should always be 0 */ 1181 /* offset for VIO should always be 0 */
1181 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; 1182 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT_4K;
1182 tbl->it_busno = 0; 1183 tbl->it_busno = 0;
1183 tbl->it_type = TCE_VB; 1184 tbl->it_type = TCE_VB;
1184 tbl->it_blocksize = 16; 1185 tbl->it_blocksize = 16;
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index b53560660b72..fc61b908eaf0 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
197 197
198 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); 198 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
199 199
200 for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) 200 for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE_4K)
201 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask); 201 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
202 202
203 mb(); 203 mb();
@@ -430,7 +430,7 @@ static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
430{ 430{
431 cell_iommu_setup_stab(iommu, base, size, 0, 0); 431 cell_iommu_setup_stab(iommu, base, size, 0, 0);
432 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, 432 iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
433 IOMMU_PAGE_SHIFT); 433 IOMMU_PAGE_SHIFT_4K);
434 cell_iommu_enable_hardware(iommu); 434 cell_iommu_enable_hardware(iommu);
435} 435}
436 436
@@ -487,8 +487,8 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
487 window->table.it_blocksize = 16; 487 window->table.it_blocksize = 16;
488 window->table.it_base = (unsigned long)iommu->ptab; 488 window->table.it_base = (unsigned long)iommu->ptab;
489 window->table.it_index = iommu->nid; 489 window->table.it_index = iommu->nid;
490 window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset; 490 window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT_4K) + pte_offset;
491 window->table.it_size = size >> IOMMU_PAGE_SHIFT; 491 window->table.it_size = size >> IOMMU_PAGE_SHIFT_4K;
492 492
493 iommu_init_table(&window->table, iommu->nid); 493 iommu_init_table(&window->table, iommu->nid);
494 494
@@ -773,7 +773,7 @@ static void __init cell_iommu_init_one(struct device_node *np,
773 773
774 /* Setup the iommu_table */ 774 /* Setup the iommu_table */
775 cell_iommu_setup_window(iommu, np, base, size, 775 cell_iommu_setup_window(iommu, np, base, size,
776 offset >> IOMMU_PAGE_SHIFT); 776 offset >> IOMMU_PAGE_SHIFT_4K);
777} 777}
778 778
779static void __init cell_disable_iommus(void) 779static void __init cell_disable_iommus(void)
@@ -1122,7 +1122,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
1122 1122
1123 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); 1123 cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1124 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, 1124 iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
1125 IOMMU_PAGE_SHIFT); 1125 IOMMU_PAGE_SHIFT_4K);
1126 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, 1126 cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
1127 fbase, fsize); 1127 fbase, fsize);
1128 cell_iommu_enable_hardware(iommu); 1128 cell_iommu_enable_hardware(iommu);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index bac289aac7cc..7f4d857668a9 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -564,7 +564,7 @@ void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
564{ 564{
565 tbl->it_blocksize = 16; 565 tbl->it_blocksize = 16;
566 tbl->it_base = (unsigned long)tce_mem; 566 tbl->it_base = (unsigned long)tce_mem;
567 tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT; 567 tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT_4K;
568 tbl->it_index = 0; 568 tbl->it_index = 0;
569 tbl->it_size = tce_size >> 3; 569 tbl->it_size = tce_size >> 3;
570 tbl->it_busno = 0; 570 tbl->it_busno = 0;
@@ -761,7 +761,7 @@ static struct notifier_block tce_iommu_bus_nb = {
761 761
762static int __init tce_iommu_bus_notifier_init(void) 762static int __init tce_iommu_bus_notifier_init(void)
763{ 763{
764 BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE); 764 BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE_4K);
765 765
766 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); 766 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
767 return 0; 767 return 0;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index a80af6c20cba..1b7531ce0c0c 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -488,7 +488,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
488 tbl->it_busno = phb->bus->number; 488 tbl->it_busno = phb->bus->number;
489 489
490 /* Units of tce entries */ 490 /* Units of tce entries */
491 tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT; 491 tbl->it_offset = phb->dma_window_base_cur >> IOMMU_PAGE_SHIFT_4K;
492 492
493 /* Test if we are going over 2GB of DMA space */ 493 /* Test if we are going over 2GB of DMA space */
494 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) { 494 if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
@@ -499,7 +499,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
499 phb->dma_window_base_cur += phb->dma_window_size; 499 phb->dma_window_base_cur += phb->dma_window_size;
500 500
501 /* Set the tce table size - measured in entries */ 501 /* Set the tce table size - measured in entries */
502 tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT; 502 tbl->it_size = phb->dma_window_size >> IOMMU_PAGE_SHIFT_4K;
503 503
504 tbl->it_index = 0; 504 tbl->it_index = 0;
505 tbl->it_blocksize = 16; 505 tbl->it_blocksize = 16;
@@ -540,8 +540,8 @@ static void iommu_table_setparms_lpar(struct pci_controller *phb,
540 tbl->it_base = 0; 540 tbl->it_base = 0;
541 tbl->it_blocksize = 16; 541 tbl->it_blocksize = 16;
542 tbl->it_type = TCE_PCI; 542 tbl->it_type = TCE_PCI;
543 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT; 543 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT_4K;
544 tbl->it_size = size >> IOMMU_PAGE_SHIFT; 544 tbl->it_size = size >> IOMMU_PAGE_SHIFT_4K;
545} 545}
546 546
547static void pci_dma_bus_setup_pSeries(struct pci_bus *bus) 547static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c1f190858701..49cd16e6b450 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -72,7 +72,7 @@
72 72
73int CMO_PrPSP = -1; 73int CMO_PrPSP = -1;
74int CMO_SecPSP = -1; 74int CMO_SecPSP = -1;
75unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT); 75unsigned long CMO_PageSize = (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K);
76EXPORT_SYMBOL(CMO_PageSize); 76EXPORT_SYMBOL(CMO_PageSize);
77 77
78int fwnmi_active; /* TRUE if an FWNMI handler is present */ 78int fwnmi_active; /* TRUE if an FWNMI handler is present */
@@ -569,7 +569,7 @@ void pSeries_cmo_feature_init(void)
569{ 569{
570 char *ptr, *key, *value, *end; 570 char *ptr, *key, *value, *end;
571 int call_status; 571 int call_status;
572 int page_order = IOMMU_PAGE_SHIFT; 572 int page_order = IOMMU_PAGE_SHIFT_4K;
573 573
574 pr_debug(" -> fw_cmo_feature_init()\n"); 574 pr_debug(" -> fw_cmo_feature_init()\n");
575 spin_lock(&rtas_data_buf_lock); 575 spin_lock(&rtas_data_buf_lock);
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
index 62cb527493e7..8a589618551e 100644
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -260,7 +260,7 @@ static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
260 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; 260 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
261 261
262 dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n", 262 dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
263 tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT); 263 tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT_4K);
264 264
265 uaddr += TCE_PAGE_SIZE; 265 uaddr += TCE_PAGE_SIZE;
266 index++; 266 index++;
@@ -381,8 +381,8 @@ static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
381 381
382 /* Init bits and pieces */ 382 /* Init bits and pieces */
383 tbl->table.it_blocksize = 16; 383 tbl->table.it_blocksize = 16;
384 tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT; 384 tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT_4K;
385 tbl->table.it_size = size >> IOMMU_PAGE_SHIFT; 385 tbl->table.it_size = size >> IOMMU_PAGE_SHIFT_4K;
386 386
387 /* 387 /*
388 * It's already blank but we clear it anyway. 388 * It's already blank but we clear it anyway.
@@ -449,8 +449,8 @@ static void wsp_pci_dma_dev_setup(struct pci_dev *pdev)
449 if (table) { 449 if (table) {
450 pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n", 450 pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
451 pci_name(pdev), 451 pci_name(pdev),
452 table->table.it_offset << IOMMU_PAGE_SHIFT, 452 table->table.it_offset << IOMMU_PAGE_SHIFT_4K,
453 (table->table.it_offset << IOMMU_PAGE_SHIFT) 453 (table->table.it_offset << IOMMU_PAGE_SHIFT_4K)
454 + phb->dma32_region_size - 1); 454 + phb->dma32_region_size - 1);
455 archdata->dma_data.iommu_table_base = &table->table; 455 archdata->dma_data.iommu_table_base = &table->table;
456 return; 456 return;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 952d795230a4..f7d7538b6bd9 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1282,24 +1282,25 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1282 1282
1283 /* netdev inits at probe time along with the structures we need below*/ 1283 /* netdev inits at probe time along with the structures we need below*/
1284 if (netdev == NULL) 1284 if (netdev == NULL)
1285 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT); 1285 return IOMMU_PAGE_ALIGN_4K(IBMVETH_IO_ENTITLEMENT_DEFAULT);
1286 1286
1287 adapter = netdev_priv(netdev); 1287 adapter = netdev_priv(netdev);
1288 1288
1289 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1289 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1290 ret += IOMMU_PAGE_ALIGN(netdev->mtu); 1290 ret += IOMMU_PAGE_ALIGN_4K(netdev->mtu);
1291 1291
1292 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 1292 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1293 /* add the size of the active receive buffers */ 1293 /* add the size of the active receive buffers */
1294 if (adapter->rx_buff_pool[i].active) 1294 if (adapter->rx_buff_pool[i].active)
1295 ret += 1295 ret +=
1296 adapter->rx_buff_pool[i].size * 1296 adapter->rx_buff_pool[i].size *
1297 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i]. 1297 IOMMU_PAGE_ALIGN_4K(adapter->rx_buff_pool[i].
1298 buff_size); 1298 buff_size);
1299 rxqentries += adapter->rx_buff_pool[i].size; 1299 rxqentries += adapter->rx_buff_pool[i].size;
1300 } 1300 }
1301 /* add the size of the receive queue entries */ 1301 /* add the size of the receive queue entries */
1302 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry)); 1302 ret += IOMMU_PAGE_ALIGN_4K(
1303 rxqentries * sizeof(struct ibmveth_rx_q_entry));
1303 1304
1304 return ret; 1305 return ret;
1305} 1306}
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index bdae7a04af75..a84788ba662c 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -81,7 +81,7 @@ static int tce_iommu_enable(struct tce_container *container)
81 * enforcing the limit based on the max that the guest can map. 81 * enforcing the limit based on the max that the guest can map.
82 */ 82 */
83 down_write(&current->mm->mmap_sem); 83 down_write(&current->mm->mmap_sem);
84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 84 npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
85 locked = current->mm->locked_vm + npages; 85 locked = current->mm->locked_vm + npages;
86 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 86 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
87 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) { 87 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
@@ -110,7 +110,7 @@ static void tce_iommu_disable(struct tce_container *container)
110 110
111 down_write(&current->mm->mmap_sem); 111 down_write(&current->mm->mmap_sem);
112 current->mm->locked_vm -= (container->tbl->it_size << 112 current->mm->locked_vm -= (container->tbl->it_size <<
113 IOMMU_PAGE_SHIFT) >> PAGE_SHIFT; 113 IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
114 up_write(&current->mm->mmap_sem); 114 up_write(&current->mm->mmap_sem);
115} 115}
116 116
@@ -174,8 +174,8 @@ static long tce_iommu_ioctl(void *iommu_data,
174 if (info.argsz < minsz) 174 if (info.argsz < minsz)
175 return -EINVAL; 175 return -EINVAL;
176 176
177 info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT; 177 info.dma32_window_start = tbl->it_offset << IOMMU_PAGE_SHIFT_4K;
178 info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT; 178 info.dma32_window_size = tbl->it_size << IOMMU_PAGE_SHIFT_4K;
179 info.flags = 0; 179 info.flags = 0;
180 180
181 if (copy_to_user((void __user *)arg, &info, minsz)) 181 if (copy_to_user((void __user *)arg, &info, minsz))
@@ -205,8 +205,8 @@ static long tce_iommu_ioctl(void *iommu_data,
205 VFIO_DMA_MAP_FLAG_WRITE)) 205 VFIO_DMA_MAP_FLAG_WRITE))
206 return -EINVAL; 206 return -EINVAL;
207 207
208 if ((param.size & ~IOMMU_PAGE_MASK) || 208 if ((param.size & ~IOMMU_PAGE_MASK_4K) ||
209 (param.vaddr & ~IOMMU_PAGE_MASK)) 209 (param.vaddr & ~IOMMU_PAGE_MASK_4K))
210 return -EINVAL; 210 return -EINVAL;
211 211
212 /* iova is checked by the IOMMU API */ 212 /* iova is checked by the IOMMU API */
@@ -220,17 +220,17 @@ static long tce_iommu_ioctl(void *iommu_data,
220 if (ret) 220 if (ret)
221 return ret; 221 return ret;
222 222
223 for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT); ++i) { 223 for (i = 0; i < (param.size >> IOMMU_PAGE_SHIFT_4K); ++i) {
224 ret = iommu_put_tce_user_mode(tbl, 224 ret = iommu_put_tce_user_mode(tbl,
225 (param.iova >> IOMMU_PAGE_SHIFT) + i, 225 (param.iova >> IOMMU_PAGE_SHIFT_4K) + i,
226 tce); 226 tce);
227 if (ret) 227 if (ret)
228 break; 228 break;
229 tce += IOMMU_PAGE_SIZE; 229 tce += IOMMU_PAGE_SIZE_4K;
230 } 230 }
231 if (ret) 231 if (ret)
232 iommu_clear_tces_and_put_pages(tbl, 232 iommu_clear_tces_and_put_pages(tbl,
233 param.iova >> IOMMU_PAGE_SHIFT, i); 233 param.iova >> IOMMU_PAGE_SHIFT_4K, i);
234 234
235 iommu_flush_tce(tbl); 235 iommu_flush_tce(tbl);
236 236
@@ -256,17 +256,17 @@ static long tce_iommu_ioctl(void *iommu_data,
256 if (param.flags) 256 if (param.flags)
257 return -EINVAL; 257 return -EINVAL;
258 258
259 if (param.size & ~IOMMU_PAGE_MASK) 259 if (param.size & ~IOMMU_PAGE_MASK_4K)
260 return -EINVAL; 260 return -EINVAL;
261 261
262 ret = iommu_tce_clear_param_check(tbl, param.iova, 0, 262 ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
263 param.size >> IOMMU_PAGE_SHIFT); 263 param.size >> IOMMU_PAGE_SHIFT_4K);
264 if (ret) 264 if (ret)
265 return ret; 265 return ret;
266 266
267 ret = iommu_clear_tces_and_put_pages(tbl, 267 ret = iommu_clear_tces_and_put_pages(tbl,
268 param.iova >> IOMMU_PAGE_SHIFT, 268 param.iova >> IOMMU_PAGE_SHIFT_4K,
269 param.size >> IOMMU_PAGE_SHIFT); 269 param.size >> IOMMU_PAGE_SHIFT_4K);
270 iommu_flush_tce(tbl); 270 iommu_flush_tce(tbl);
271 271
272 return ret; 272 return ret;