aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlistair Popple <alistair@popple.id.au>2013-12-09 02:17:03 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-12-29 22:17:19 -0500
commitd084775738b746648d4102337163a04534a02982 (patch)
tree641f5ea99dcdb27f51a7646d56a8d245f1d7f9c1
parent3a553170d35d69bea3877bffa508489dfa6f133d (diff)
powerpc/iommu: Update the generic code to use dynamic iommu page sizes
This patch updates the generic iommu backend code to use the it_page_shift field to determine the iommu page size instead of using hardcoded values. Signed-off-by: Alistair Popple <alistair@popple.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/iommu.h19
-rw-r--r--arch/powerpc/kernel/dma-iommu.c4
-rw-r--r--arch/powerpc/kernel/iommu.c88
-rw-r--r--arch/powerpc/kernel/vio.c25
-rw-r--r--arch/powerpc/platforms/powernv/pci.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c15
6 files changed, 88 insertions, 65 deletions
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 7c928342f3f5..f7a8036579b5 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -35,17 +35,14 @@
35#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1)) 35#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
36#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K) 36#define IOMMU_PAGE_ALIGN_4K(addr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE_4K)
37 37
38#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
39#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
40#define IOMMU_PAGE_ALIGN(addr, tblptr) _ALIGN_UP(addr, IOMMU_PAGE_SIZE(tblptr))
41
38/* Boot time flags */ 42/* Boot time flags */
39extern int iommu_is_off; 43extern int iommu_is_off;
40extern int iommu_force_on; 44extern int iommu_force_on;
41 45
42/* Pure 2^n version of get_order */
43static __inline__ __attribute_const__ int get_iommu_order(unsigned long size)
44{
45 return __ilog2((size - 1) >> IOMMU_PAGE_SHIFT_4K) + 1;
46}
47
48
49/* 46/*
50 * IOMAP_MAX_ORDER defines the largest contiguous block 47 * IOMAP_MAX_ORDER defines the largest contiguous block
51 * of dma space we can get. IOMAP_MAX_ORDER = 13 48 * of dma space we can get. IOMAP_MAX_ORDER = 13
@@ -82,6 +79,14 @@ struct iommu_table {
82#endif 79#endif
83}; 80};
84 81
82/* Pure 2^n version of get_order */
83static inline __attribute_const__
84int get_iommu_order(unsigned long size, struct iommu_table *tbl)
85{
86 return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
87}
88
89
85struct scatterlist; 90struct scatterlist;
86 91
87static inline void set_iommu_table_base(struct device *dev, void *base) 92static inline void set_iommu_table_base(struct device *dev, void *base)
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
index 5cfe3dbfc4a0..54d0116256f7 100644
--- a/arch/powerpc/kernel/dma-iommu.c
+++ b/arch/powerpc/kernel/dma-iommu.c
@@ -83,10 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask)
83 return 0; 83 return 0;
84 } 84 }
85 85
86 if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT_4K)) { 86 if (tbl->it_offset > (mask >> tbl->it_page_shift)) {
87 dev_info(dev, "Warning: IOMMU offset too big for device mask\n"); 87 dev_info(dev, "Warning: IOMMU offset too big for device mask\n");
88 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n", 88 dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n",
89 mask, tbl->it_offset << IOMMU_PAGE_SHIFT_4K); 89 mask, tbl->it_offset << tbl->it_page_shift);
90 return 0; 90 return 0;
91 } else 91 } else
92 return 1; 92 return 1;
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index df4a7f1b7444..f58d8135aab2 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -251,14 +251,13 @@ again:
251 251
252 if (dev) 252 if (dev)
253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 253 boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
254 1 << IOMMU_PAGE_SHIFT_4K); 254 1 << tbl->it_page_shift);
255 else 255 else
256 boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT_4K); 256 boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ 257 /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
258 258
259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, 259 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
260 tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT_4K, 260 boundary_size >> tbl->it_page_shift, align_mask);
261 align_mask);
262 if (n == -1) { 261 if (n == -1) {
263 if (likely(pass == 0)) { 262 if (likely(pass == 0)) {
264 /* First try the pool from the start */ 263 /* First try the pool from the start */
@@ -320,12 +319,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
320 return DMA_ERROR_CODE; 319 return DMA_ERROR_CODE;
321 320
322 entry += tbl->it_offset; /* Offset into real TCE table */ 321 entry += tbl->it_offset; /* Offset into real TCE table */
323 ret = entry << IOMMU_PAGE_SHIFT_4K; /* Set the return dma address */ 322 ret = entry << tbl->it_page_shift; /* Set the return dma address */
324 323
325 /* Put the TCEs in the HW table */ 324 /* Put the TCEs in the HW table */
326 build_fail = ppc_md.tce_build(tbl, entry, npages, 325 build_fail = ppc_md.tce_build(tbl, entry, npages,
327 (unsigned long)page & IOMMU_PAGE_MASK_4K, 326 (unsigned long)page &
328 direction, attrs); 327 IOMMU_PAGE_MASK(tbl), direction, attrs);
329 328
330 /* ppc_md.tce_build() only returns non-zero for transient errors. 329 /* ppc_md.tce_build() only returns non-zero for transient errors.
331 * Clean up the table bitmap in this case and return 330 * Clean up the table bitmap in this case and return
@@ -352,7 +351,7 @@ static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
352{ 351{
353 unsigned long entry, free_entry; 352 unsigned long entry, free_entry;
354 353
355 entry = dma_addr >> IOMMU_PAGE_SHIFT_4K; 354 entry = dma_addr >> tbl->it_page_shift;
356 free_entry = entry - tbl->it_offset; 355 free_entry = entry - tbl->it_offset;
357 356
358 if (((free_entry + npages) > tbl->it_size) || 357 if (((free_entry + npages) > tbl->it_size) ||
@@ -401,7 +400,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
401 unsigned long flags; 400 unsigned long flags;
402 struct iommu_pool *pool; 401 struct iommu_pool *pool;
403 402
404 entry = dma_addr >> IOMMU_PAGE_SHIFT_4K; 403 entry = dma_addr >> tbl->it_page_shift;
405 free_entry = entry - tbl->it_offset; 404 free_entry = entry - tbl->it_offset;
406 405
407 pool = get_pool(tbl, free_entry); 406 pool = get_pool(tbl, free_entry);
@@ -468,13 +467,13 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
468 } 467 }
469 /* Allocate iommu entries for that segment */ 468 /* Allocate iommu entries for that segment */
470 vaddr = (unsigned long) sg_virt(s); 469 vaddr = (unsigned long) sg_virt(s);
471 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE_4K); 470 npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
472 align = 0; 471 align = 0;
473 if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && slen >= PAGE_SIZE && 472 if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
474 (vaddr & ~PAGE_MASK) == 0) 473 (vaddr & ~PAGE_MASK) == 0)
475 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K; 474 align = PAGE_SHIFT - tbl->it_page_shift;
476 entry = iommu_range_alloc(dev, tbl, npages, &handle, 475 entry = iommu_range_alloc(dev, tbl, npages, &handle,
477 mask >> IOMMU_PAGE_SHIFT_4K, align); 476 mask >> tbl->it_page_shift, align);
478 477
479 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); 478 DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen);
480 479
@@ -489,16 +488,16 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
489 488
490 /* Convert entry to a dma_addr_t */ 489 /* Convert entry to a dma_addr_t */
491 entry += tbl->it_offset; 490 entry += tbl->it_offset;
492 dma_addr = entry << IOMMU_PAGE_SHIFT_4K; 491 dma_addr = entry << tbl->it_page_shift;
493 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK_4K); 492 dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
494 493
495 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", 494 DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
496 npages, entry, dma_addr); 495 npages, entry, dma_addr);
497 496
498 /* Insert into HW table */ 497 /* Insert into HW table */
499 build_fail = ppc_md.tce_build(tbl, entry, npages, 498 build_fail = ppc_md.tce_build(tbl, entry, npages,
500 vaddr & IOMMU_PAGE_MASK_4K, 499 vaddr & IOMMU_PAGE_MASK(tbl),
501 direction, attrs); 500 direction, attrs);
502 if(unlikely(build_fail)) 501 if(unlikely(build_fail))
503 goto failure; 502 goto failure;
504 503
@@ -559,9 +558,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
559 if (s->dma_length != 0) { 558 if (s->dma_length != 0) {
560 unsigned long vaddr, npages; 559 unsigned long vaddr, npages;
561 560
562 vaddr = s->dma_address & IOMMU_PAGE_MASK_4K; 561 vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
563 npages = iommu_num_pages(s->dma_address, s->dma_length, 562 npages = iommu_num_pages(s->dma_address, s->dma_length,
564 IOMMU_PAGE_SIZE_4K); 563 IOMMU_PAGE_SIZE(tbl));
565 __iommu_free(tbl, vaddr, npages); 564 __iommu_free(tbl, vaddr, npages);
566 s->dma_address = DMA_ERROR_CODE; 565 s->dma_address = DMA_ERROR_CODE;
567 s->dma_length = 0; 566 s->dma_length = 0;
@@ -592,7 +591,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
592 if (sg->dma_length == 0) 591 if (sg->dma_length == 0)
593 break; 592 break;
594 npages = iommu_num_pages(dma_handle, sg->dma_length, 593 npages = iommu_num_pages(dma_handle, sg->dma_length,
595 IOMMU_PAGE_SIZE_4K); 594 IOMMU_PAGE_SIZE(tbl));
596 __iommu_free(tbl, dma_handle, npages); 595 __iommu_free(tbl, dma_handle, npages);
597 sg = sg_next(sg); 596 sg = sg_next(sg);
598 } 597 }
@@ -676,7 +675,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
676 set_bit(0, tbl->it_map); 675 set_bit(0, tbl->it_map);
677 676
678 /* We only split the IOMMU table if we have 1GB or more of space */ 677 /* We only split the IOMMU table if we have 1GB or more of space */
679 if ((tbl->it_size << IOMMU_PAGE_SHIFT_4K) >= (1UL * 1024 * 1024 * 1024)) 678 if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
680 tbl->nr_pools = IOMMU_NR_POOLS; 679 tbl->nr_pools = IOMMU_NR_POOLS;
681 else 680 else
682 tbl->nr_pools = 1; 681 tbl->nr_pools = 1;
@@ -768,16 +767,16 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
768 767
769 vaddr = page_address(page) + offset; 768 vaddr = page_address(page) + offset;
770 uaddr = (unsigned long)vaddr; 769 uaddr = (unsigned long)vaddr;
771 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE_4K); 770 npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
772 771
773 if (tbl) { 772 if (tbl) {
774 align = 0; 773 align = 0;
775 if (IOMMU_PAGE_SHIFT_4K < PAGE_SHIFT && size >= PAGE_SIZE && 774 if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
776 ((unsigned long)vaddr & ~PAGE_MASK) == 0) 775 ((unsigned long)vaddr & ~PAGE_MASK) == 0)
777 align = PAGE_SHIFT - IOMMU_PAGE_SHIFT_4K; 776 align = PAGE_SHIFT - tbl->it_page_shift;
778 777
779 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, 778 dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
780 mask >> IOMMU_PAGE_SHIFT_4K, align, 779 mask >> tbl->it_page_shift, align,
781 attrs); 780 attrs);
782 if (dma_handle == DMA_ERROR_CODE) { 781 if (dma_handle == DMA_ERROR_CODE) {
783 if (printk_ratelimit()) { 782 if (printk_ratelimit()) {
@@ -786,7 +785,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
786 npages); 785 npages);
787 } 786 }
788 } else 787 } else
789 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK_4K); 788 dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
790 } 789 }
791 790
792 return dma_handle; 791 return dma_handle;
@@ -801,7 +800,8 @@ void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
801 BUG_ON(direction == DMA_NONE); 800 BUG_ON(direction == DMA_NONE);
802 801
803 if (tbl) { 802 if (tbl) {
804 npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE_4K); 803 npages = iommu_num_pages(dma_handle, size,
804 IOMMU_PAGE_SIZE(tbl));
805 iommu_free(tbl, dma_handle, npages); 805 iommu_free(tbl, dma_handle, npages);
806 } 806 }
807} 807}
@@ -845,10 +845,10 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
845 memset(ret, 0, size); 845 memset(ret, 0, size);
846 846
847 /* Set up tces to cover the allocated range */ 847 /* Set up tces to cover the allocated range */
848 nio_pages = size >> IOMMU_PAGE_SHIFT_4K; 848 nio_pages = size >> tbl->it_page_shift;
849 io_order = get_iommu_order(size); 849 io_order = get_iommu_order(size, tbl);
850 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, 850 mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
851 mask >> IOMMU_PAGE_SHIFT_4K, io_order, NULL); 851 mask >> tbl->it_page_shift, io_order, NULL);
852 if (mapping == DMA_ERROR_CODE) { 852 if (mapping == DMA_ERROR_CODE) {
853 free_pages((unsigned long)ret, order); 853 free_pages((unsigned long)ret, order);
854 return NULL; 854 return NULL;
@@ -864,7 +864,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
864 unsigned int nio_pages; 864 unsigned int nio_pages;
865 865
866 size = PAGE_ALIGN(size); 866 size = PAGE_ALIGN(size);
867 nio_pages = size >> IOMMU_PAGE_SHIFT_4K; 867 nio_pages = size >> tbl->it_page_shift;
868 iommu_free(tbl, dma_handle, nio_pages); 868 iommu_free(tbl, dma_handle, nio_pages);
869 size = PAGE_ALIGN(size); 869 size = PAGE_ALIGN(size);
870 free_pages((unsigned long)vaddr, get_order(size)); 870 free_pages((unsigned long)vaddr, get_order(size));
@@ -935,10 +935,10 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl,
935 if (tce_value) 935 if (tce_value)
936 return -EINVAL; 936 return -EINVAL;
937 937
938 if (ioba & ~IOMMU_PAGE_MASK_4K) 938 if (ioba & ~IOMMU_PAGE_MASK(tbl))
939 return -EINVAL; 939 return -EINVAL;
940 940
941 ioba >>= IOMMU_PAGE_SHIFT_4K; 941 ioba >>= tbl->it_page_shift;
942 if (ioba < tbl->it_offset) 942 if (ioba < tbl->it_offset)
943 return -EINVAL; 943 return -EINVAL;
944 944
@@ -955,13 +955,13 @@ int iommu_tce_put_param_check(struct iommu_table *tbl,
955 if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ))) 955 if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ)))
956 return -EINVAL; 956 return -EINVAL;
957 957
958 if (tce & ~(IOMMU_PAGE_MASK_4K | TCE_PCI_WRITE | TCE_PCI_READ)) 958 if (tce & ~(IOMMU_PAGE_MASK(tbl) | TCE_PCI_WRITE | TCE_PCI_READ))
959 return -EINVAL; 959 return -EINVAL;
960 960
961 if (ioba & ~IOMMU_PAGE_MASK_4K) 961 if (ioba & ~IOMMU_PAGE_MASK(tbl))
962 return -EINVAL; 962 return -EINVAL;
963 963
964 ioba >>= IOMMU_PAGE_SHIFT_4K; 964 ioba >>= tbl->it_page_shift;
965 if (ioba < tbl->it_offset) 965 if (ioba < tbl->it_offset)
966 return -EINVAL; 966 return -EINVAL;
967 967
@@ -1037,7 +1037,7 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry,
1037 1037
1038 /* if (unlikely(ret)) 1038 /* if (unlikely(ret))
1039 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", 1039 pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
1040 __func__, hwaddr, entry << IOMMU_PAGE_SHIFT_4K, 1040 __func__, hwaddr, entry << IOMMU_PAGE_SHIFT(tbl),
1041 hwaddr, ret); */ 1041 hwaddr, ret); */
1042 1042
1043 return ret; 1043 return ret;
@@ -1049,14 +1049,14 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
1049{ 1049{
1050 int ret; 1050 int ret;
1051 struct page *page = NULL; 1051 struct page *page = NULL;
1052 unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK_4K & ~PAGE_MASK; 1052 unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
1053 enum dma_data_direction direction = iommu_tce_direction(tce); 1053 enum dma_data_direction direction = iommu_tce_direction(tce);
1054 1054
1055 ret = get_user_pages_fast(tce & PAGE_MASK, 1, 1055 ret = get_user_pages_fast(tce & PAGE_MASK, 1,
1056 direction != DMA_TO_DEVICE, &page); 1056 direction != DMA_TO_DEVICE, &page);
1057 if (unlikely(ret != 1)) { 1057 if (unlikely(ret != 1)) {
1058 /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n", 1058 /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n",
1059 tce, entry << IOMMU_PAGE_SHIFT_4K, ret); */ 1059 tce, entry << IOMMU_PAGE_SHIFT(tbl), ret); */
1060 return -EFAULT; 1060 return -EFAULT;
1061 } 1061 }
1062 hwaddr = (unsigned long) page_address(page) + offset; 1062 hwaddr = (unsigned long) page_address(page) + offset;
@@ -1067,7 +1067,7 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry,
1067 1067
1068 if (ret < 0) 1068 if (ret < 0)
1069 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n", 1069 pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n",
1070 __func__, entry << IOMMU_PAGE_SHIFT_4K, tce, ret); 1070 __func__, entry << tbl->it_page_shift, tce, ret);
1071 1071
1072 return ret; 1072 return ret;
1073} 1073}
@@ -1127,6 +1127,12 @@ int iommu_add_device(struct device *dev)
1127 pr_debug("iommu_tce: adding %s to iommu group %d\n", 1127 pr_debug("iommu_tce: adding %s to iommu group %d\n",
1128 dev_name(dev), iommu_group_id(tbl->it_group)); 1128 dev_name(dev), iommu_group_id(tbl->it_group));
1129 1129
1130 if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
1131 pr_err("iommu_tce: unsupported iommu page size.");
1132 pr_err("%s has not been added\n", dev_name(dev));
1133 return -EINVAL;
1134 }
1135
1130 ret = iommu_group_add_device(tbl->it_group, dev); 1136 ret = iommu_group_add_device(tbl->it_group, dev);
1131 if (ret < 0) 1137 if (ret < 0)
1132 pr_err("iommu_tce: %s has not been added, ret=%d\n", 1138 pr_err("iommu_tce: %s has not been added, ret=%d\n",
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 170ac24a0106..826d8bd9e522 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -518,16 +518,18 @@ static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
518 struct dma_attrs *attrs) 518 struct dma_attrs *attrs)
519{ 519{
520 struct vio_dev *viodev = to_vio_dev(dev); 520 struct vio_dev *viodev = to_vio_dev(dev);
521 struct iommu_table *tbl;
521 dma_addr_t ret = DMA_ERROR_CODE; 522 dma_addr_t ret = DMA_ERROR_CODE;
522 523
523 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K))) { 524 tbl = get_iommu_table_base(dev);
525 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)))) {
524 atomic_inc(&viodev->cmo.allocs_failed); 526 atomic_inc(&viodev->cmo.allocs_failed);
525 return ret; 527 return ret;
526 } 528 }
527 529
528 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs); 530 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
529 if (unlikely(dma_mapping_error(dev, ret))) { 531 if (unlikely(dma_mapping_error(dev, ret))) {
530 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K)); 532 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
531 atomic_inc(&viodev->cmo.allocs_failed); 533 atomic_inc(&viodev->cmo.allocs_failed);
532 } 534 }
533 535
@@ -540,10 +542,12 @@ static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
540 struct dma_attrs *attrs) 542 struct dma_attrs *attrs)
541{ 543{
542 struct vio_dev *viodev = to_vio_dev(dev); 544 struct vio_dev *viodev = to_vio_dev(dev);
545 struct iommu_table *tbl;
543 546
547 tbl = get_iommu_table_base(dev);
544 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs); 548 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
545 549
546 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE_4K)); 550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE(tbl)));
547} 551}
548 552
549static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, 553static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -551,12 +555,14 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
551 struct dma_attrs *attrs) 555 struct dma_attrs *attrs)
552{ 556{
553 struct vio_dev *viodev = to_vio_dev(dev); 557 struct vio_dev *viodev = to_vio_dev(dev);
558 struct iommu_table *tbl;
554 struct scatterlist *sgl; 559 struct scatterlist *sgl;
555 int ret, count = 0; 560 int ret, count = 0;
556 size_t alloc_size = 0; 561 size_t alloc_size = 0;
557 562
563 tbl = get_iommu_table_base(dev);
558 for (sgl = sglist; count < nelems; count++, sgl++) 564 for (sgl = sglist; count < nelems; count++, sgl++)
559 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE_4K); 565 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE(tbl));
560 566
561 if (vio_cmo_alloc(viodev, alloc_size)) { 567 if (vio_cmo_alloc(viodev, alloc_size)) {
562 atomic_inc(&viodev->cmo.allocs_failed); 568 atomic_inc(&viodev->cmo.allocs_failed);
@@ -572,7 +578,7 @@ static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
572 } 578 }
573 579
574 for (sgl = sglist, count = 0; count < ret; count++, sgl++) 580 for (sgl = sglist, count = 0; count < ret; count++, sgl++)
575 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K); 581 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
576 if (alloc_size) 582 if (alloc_size)
577 vio_cmo_dealloc(viodev, alloc_size); 583 vio_cmo_dealloc(viodev, alloc_size);
578 584
@@ -585,12 +591,14 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
585 struct dma_attrs *attrs) 591 struct dma_attrs *attrs)
586{ 592{
587 struct vio_dev *viodev = to_vio_dev(dev); 593 struct vio_dev *viodev = to_vio_dev(dev);
594 struct iommu_table *tbl;
588 struct scatterlist *sgl; 595 struct scatterlist *sgl;
589 size_t alloc_size = 0; 596 size_t alloc_size = 0;
590 int count = 0; 597 int count = 0;
591 598
599 tbl = get_iommu_table_base(dev);
592 for (sgl = sglist; count < nelems; count++, sgl++) 600 for (sgl = sglist; count < nelems; count++, sgl++)
593 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE_4K); 601 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE(tbl));
594 602
595 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs); 603 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
596 604
@@ -706,11 +714,14 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
706{ 714{
707 struct vio_cmo_dev_entry *dev_ent; 715 struct vio_cmo_dev_entry *dev_ent;
708 struct device *dev = &viodev->dev; 716 struct device *dev = &viodev->dev;
717 struct iommu_table *tbl;
709 struct vio_driver *viodrv = to_vio_driver(dev->driver); 718 struct vio_driver *viodrv = to_vio_driver(dev->driver);
710 unsigned long flags; 719 unsigned long flags;
711 size_t size; 720 size_t size;
712 bool dma_capable = false; 721 bool dma_capable = false;
713 722
723 tbl = get_iommu_table_base(dev);
724
714 /* A device requires entitlement if it has a DMA window property */ 725 /* A device requires entitlement if it has a DMA window property */
715 switch (viodev->family) { 726 switch (viodev->family) {
716 case VDEVICE: 727 case VDEVICE:
@@ -737,7 +748,7 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev)
737 } 748 }
738 749
739 viodev->cmo.desired = 750 viodev->cmo.desired =
740 IOMMU_PAGE_ALIGN_4K(viodrv->get_desired_dma(viodev)); 751 IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev), tbl);
741 if (viodev->cmo.desired < VIO_CMO_MIN_ENT) 752 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
742 viodev->cmo.desired = VIO_CMO_MIN_ENT; 753 viodev->cmo.desired = VIO_CMO_MIN_ENT;
743 size = VIO_CMO_MIN_ENT; 754 size = VIO_CMO_MIN_ENT;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 569b46423a93..b555ebc57ef5 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -762,8 +762,6 @@ static struct notifier_block tce_iommu_bus_nb = {
762 762
763static int __init tce_iommu_bus_notifier_init(void) 763static int __init tce_iommu_bus_notifier_init(void)
764{ 764{
765 BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE_4K);
766
767 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); 765 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
768 return 0; 766 return 0;
769} 767}
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index f7d7538b6bd9..d04dbaba83dd 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1276,31 +1276,34 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1276{ 1276{
1277 struct net_device *netdev = dev_get_drvdata(&vdev->dev); 1277 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1278 struct ibmveth_adapter *adapter; 1278 struct ibmveth_adapter *adapter;
1279 struct iommu_table *tbl;
1279 unsigned long ret; 1280 unsigned long ret;
1280 int i; 1281 int i;
1281 int rxqentries = 1; 1282 int rxqentries = 1;
1282 1283
1284 tbl = get_iommu_table_base(&vdev->dev);
1285
1283 /* netdev inits at probe time along with the structures we need below*/ 1286 /* netdev inits at probe time along with the structures we need below*/
1284 if (netdev == NULL) 1287 if (netdev == NULL)
1285 return IOMMU_PAGE_ALIGN_4K(IBMVETH_IO_ENTITLEMENT_DEFAULT); 1288 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1286 1289
1287 adapter = netdev_priv(netdev); 1290 adapter = netdev_priv(netdev);
1288 1291
1289 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE; 1292 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1290 ret += IOMMU_PAGE_ALIGN_4K(netdev->mtu); 1293 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1291 1294
1292 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { 1295 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1293 /* add the size of the active receive buffers */ 1296 /* add the size of the active receive buffers */
1294 if (adapter->rx_buff_pool[i].active) 1297 if (adapter->rx_buff_pool[i].active)
1295 ret += 1298 ret +=
1296 adapter->rx_buff_pool[i].size * 1299 adapter->rx_buff_pool[i].size *
1297 IOMMU_PAGE_ALIGN_4K(adapter->rx_buff_pool[i]. 1300 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1298 buff_size); 1301 buff_size, tbl);
1299 rxqentries += adapter->rx_buff_pool[i].size; 1302 rxqentries += adapter->rx_buff_pool[i].size;
1300 } 1303 }
1301 /* add the size of the receive queue entries */ 1304 /* add the size of the receive queue entries */
1302 ret += IOMMU_PAGE_ALIGN_4K( 1305 ret += IOMMU_PAGE_ALIGN(
1303 rxqentries * sizeof(struct ibmveth_rx_q_entry)); 1306 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1304 1307
1305 return ret; 1308 return ret;
1306} 1309}