diff options
Diffstat (limited to 'arch/powerpc/kernel/iommu.c')
-rw-r--r-- | arch/powerpc/kernel/iommu.c | 157 |
1 files changed, 73 insertions, 84 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 572bb5b95f35..88e3ec6e1d96 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -251,14 +251,13 @@ again: | |||
251 | 251 | ||
252 | if (dev) | 252 | if (dev) |
253 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | 253 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
254 | 1 << IOMMU_PAGE_SHIFT); | 254 | 1 << tbl->it_page_shift); |
255 | else | 255 | else |
256 | boundary_size = ALIGN(1UL << 32, 1 << IOMMU_PAGE_SHIFT); | 256 | boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift); |
257 | /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ | 257 | /* 4GB boundary for iseries_hv_alloc and iseries_hv_map */ |
258 | 258 | ||
259 | n = iommu_area_alloc(tbl->it_map, limit, start, npages, | 259 | n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, |
260 | tbl->it_offset, boundary_size >> IOMMU_PAGE_SHIFT, | 260 | boundary_size >> tbl->it_page_shift, align_mask); |
261 | align_mask); | ||
262 | if (n == -1) { | 261 | if (n == -1) { |
263 | if (likely(pass == 0)) { | 262 | if (likely(pass == 0)) { |
264 | /* First try the pool from the start */ | 263 | /* First try the pool from the start */ |
@@ -320,12 +319,12 @@ static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, | |||
320 | return DMA_ERROR_CODE; | 319 | return DMA_ERROR_CODE; |
321 | 320 | ||
322 | entry += tbl->it_offset; /* Offset into real TCE table */ | 321 | entry += tbl->it_offset; /* Offset into real TCE table */ |
323 | ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ | 322 | ret = entry << tbl->it_page_shift; /* Set the return dma address */ |
324 | 323 | ||
325 | /* Put the TCEs in the HW table */ | 324 | /* Put the TCEs in the HW table */ |
326 | build_fail = ppc_md.tce_build(tbl, entry, npages, | 325 | build_fail = ppc_md.tce_build(tbl, entry, npages, |
327 | (unsigned long)page & IOMMU_PAGE_MASK, | 326 | (unsigned long)page & |
328 | direction, attrs); | 327 | IOMMU_PAGE_MASK(tbl), direction, attrs); |
329 | 328 | ||
330 | /* ppc_md.tce_build() only returns non-zero for transient errors. | 329 | /* ppc_md.tce_build() only returns non-zero for transient errors. |
331 | * Clean up the table bitmap in this case and return | 330 | * Clean up the table bitmap in this case and return |
@@ -352,7 +351,7 @@ static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
352 | { | 351 | { |
353 | unsigned long entry, free_entry; | 352 | unsigned long entry, free_entry; |
354 | 353 | ||
355 | entry = dma_addr >> IOMMU_PAGE_SHIFT; | 354 | entry = dma_addr >> tbl->it_page_shift; |
356 | free_entry = entry - tbl->it_offset; | 355 | free_entry = entry - tbl->it_offset; |
357 | 356 | ||
358 | if (((free_entry + npages) > tbl->it_size) || | 357 | if (((free_entry + npages) > tbl->it_size) || |
@@ -401,7 +400,7 @@ static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |||
401 | unsigned long flags; | 400 | unsigned long flags; |
402 | struct iommu_pool *pool; | 401 | struct iommu_pool *pool; |
403 | 402 | ||
404 | entry = dma_addr >> IOMMU_PAGE_SHIFT; | 403 | entry = dma_addr >> tbl->it_page_shift; |
405 | free_entry = entry - tbl->it_offset; | 404 | free_entry = entry - tbl->it_offset; |
406 | 405 | ||
407 | pool = get_pool(tbl, free_entry); | 406 | pool = get_pool(tbl, free_entry); |
@@ -468,13 +467,13 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
468 | } | 467 | } |
469 | /* Allocate iommu entries for that segment */ | 468 | /* Allocate iommu entries for that segment */ |
470 | vaddr = (unsigned long) sg_virt(s); | 469 | vaddr = (unsigned long) sg_virt(s); |
471 | npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE); | 470 | npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl)); |
472 | align = 0; | 471 | align = 0; |
473 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE && | 472 | if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE && |
474 | (vaddr & ~PAGE_MASK) == 0) | 473 | (vaddr & ~PAGE_MASK) == 0) |
475 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; | 474 | align = PAGE_SHIFT - tbl->it_page_shift; |
476 | entry = iommu_range_alloc(dev, tbl, npages, &handle, | 475 | entry = iommu_range_alloc(dev, tbl, npages, &handle, |
477 | mask >> IOMMU_PAGE_SHIFT, align); | 476 | mask >> tbl->it_page_shift, align); |
478 | 477 | ||
479 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); | 478 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); |
480 | 479 | ||
@@ -489,16 +488,16 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
489 | 488 | ||
490 | /* Convert entry to a dma_addr_t */ | 489 | /* Convert entry to a dma_addr_t */ |
491 | entry += tbl->it_offset; | 490 | entry += tbl->it_offset; |
492 | dma_addr = entry << IOMMU_PAGE_SHIFT; | 491 | dma_addr = entry << tbl->it_page_shift; |
493 | dma_addr |= (s->offset & ~IOMMU_PAGE_MASK); | 492 | dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl)); |
494 | 493 | ||
495 | DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", | 494 | DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n", |
496 | npages, entry, dma_addr); | 495 | npages, entry, dma_addr); |
497 | 496 | ||
498 | /* Insert into HW table */ | 497 | /* Insert into HW table */ |
499 | build_fail = ppc_md.tce_build(tbl, entry, npages, | 498 | build_fail = ppc_md.tce_build(tbl, entry, npages, |
500 | vaddr & IOMMU_PAGE_MASK, | 499 | vaddr & IOMMU_PAGE_MASK(tbl), |
501 | direction, attrs); | 500 | direction, attrs); |
502 | if(unlikely(build_fail)) | 501 | if(unlikely(build_fail)) |
503 | goto failure; | 502 | goto failure; |
504 | 503 | ||
@@ -559,9 +558,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
559 | if (s->dma_length != 0) { | 558 | if (s->dma_length != 0) { |
560 | unsigned long vaddr, npages; | 559 | unsigned long vaddr, npages; |
561 | 560 | ||
562 | vaddr = s->dma_address & IOMMU_PAGE_MASK; | 561 | vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl); |
563 | npages = iommu_num_pages(s->dma_address, s->dma_length, | 562 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
564 | IOMMU_PAGE_SIZE); | 563 | IOMMU_PAGE_SIZE(tbl)); |
565 | __iommu_free(tbl, vaddr, npages); | 564 | __iommu_free(tbl, vaddr, npages); |
566 | s->dma_address = DMA_ERROR_CODE; | 565 | s->dma_address = DMA_ERROR_CODE; |
567 | s->dma_length = 0; | 566 | s->dma_length = 0; |
@@ -592,7 +591,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |||
592 | if (sg->dma_length == 0) | 591 | if (sg->dma_length == 0) |
593 | break; | 592 | break; |
594 | npages = iommu_num_pages(dma_handle, sg->dma_length, | 593 | npages = iommu_num_pages(dma_handle, sg->dma_length, |
595 | IOMMU_PAGE_SIZE); | 594 | IOMMU_PAGE_SIZE(tbl)); |
596 | __iommu_free(tbl, dma_handle, npages); | 595 | __iommu_free(tbl, dma_handle, npages); |
597 | sg = sg_next(sg); | 596 | sg = sg_next(sg); |
598 | } | 597 | } |
@@ -676,7 +675,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) | |||
676 | set_bit(0, tbl->it_map); | 675 | set_bit(0, tbl->it_map); |
677 | 676 | ||
678 | /* We only split the IOMMU table if we have 1GB or more of space */ | 677 | /* We only split the IOMMU table if we have 1GB or more of space */ |
679 | if ((tbl->it_size << IOMMU_PAGE_SHIFT) >= (1UL * 1024 * 1024 * 1024)) | 678 | if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024)) |
680 | tbl->nr_pools = IOMMU_NR_POOLS; | 679 | tbl->nr_pools = IOMMU_NR_POOLS; |
681 | else | 680 | else |
682 | tbl->nr_pools = 1; | 681 | tbl->nr_pools = 1; |
@@ -768,16 +767,16 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, | |||
768 | 767 | ||
769 | vaddr = page_address(page) + offset; | 768 | vaddr = page_address(page) + offset; |
770 | uaddr = (unsigned long)vaddr; | 769 | uaddr = (unsigned long)vaddr; |
771 | npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE); | 770 | npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl)); |
772 | 771 | ||
773 | if (tbl) { | 772 | if (tbl) { |
774 | align = 0; | 773 | align = 0; |
775 | if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && size >= PAGE_SIZE && | 774 | if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE && |
776 | ((unsigned long)vaddr & ~PAGE_MASK) == 0) | 775 | ((unsigned long)vaddr & ~PAGE_MASK) == 0) |
777 | align = PAGE_SHIFT - IOMMU_PAGE_SHIFT; | 776 | align = PAGE_SHIFT - tbl->it_page_shift; |
778 | 777 | ||
779 | dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, | 778 | dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction, |
780 | mask >> IOMMU_PAGE_SHIFT, align, | 779 | mask >> tbl->it_page_shift, align, |
781 | attrs); | 780 | attrs); |
782 | if (dma_handle == DMA_ERROR_CODE) { | 781 | if (dma_handle == DMA_ERROR_CODE) { |
783 | if (printk_ratelimit()) { | 782 | if (printk_ratelimit()) { |
@@ -786,7 +785,7 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, | |||
786 | npages); | 785 | npages); |
787 | } | 786 | } |
788 | } else | 787 | } else |
789 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); | 788 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl)); |
790 | } | 789 | } |
791 | 790 | ||
792 | return dma_handle; | 791 | return dma_handle; |
@@ -801,7 +800,8 @@ void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle, | |||
801 | BUG_ON(direction == DMA_NONE); | 800 | BUG_ON(direction == DMA_NONE); |
802 | 801 | ||
803 | if (tbl) { | 802 | if (tbl) { |
804 | npages = iommu_num_pages(dma_handle, size, IOMMU_PAGE_SIZE); | 803 | npages = iommu_num_pages(dma_handle, size, |
804 | IOMMU_PAGE_SIZE(tbl)); | ||
805 | iommu_free(tbl, dma_handle, npages); | 805 | iommu_free(tbl, dma_handle, npages); |
806 | } | 806 | } |
807 | } | 807 | } |
@@ -845,10 +845,10 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, | |||
845 | memset(ret, 0, size); | 845 | memset(ret, 0, size); |
846 | 846 | ||
847 | /* Set up tces to cover the allocated range */ | 847 | /* Set up tces to cover the allocated range */ |
848 | nio_pages = size >> IOMMU_PAGE_SHIFT; | 848 | nio_pages = size >> tbl->it_page_shift; |
849 | io_order = get_iommu_order(size); | 849 | io_order = get_iommu_order(size, tbl); |
850 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, | 850 | mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL, |
851 | mask >> IOMMU_PAGE_SHIFT, io_order, NULL); | 851 | mask >> tbl->it_page_shift, io_order, NULL); |
852 | if (mapping == DMA_ERROR_CODE) { | 852 | if (mapping == DMA_ERROR_CODE) { |
853 | free_pages((unsigned long)ret, order); | 853 | free_pages((unsigned long)ret, order); |
854 | return NULL; | 854 | return NULL; |
@@ -864,7 +864,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size, | |||
864 | unsigned int nio_pages; | 864 | unsigned int nio_pages; |
865 | 865 | ||
866 | size = PAGE_ALIGN(size); | 866 | size = PAGE_ALIGN(size); |
867 | nio_pages = size >> IOMMU_PAGE_SHIFT; | 867 | nio_pages = size >> tbl->it_page_shift; |
868 | iommu_free(tbl, dma_handle, nio_pages); | 868 | iommu_free(tbl, dma_handle, nio_pages); |
869 | size = PAGE_ALIGN(size); | 869 | size = PAGE_ALIGN(size); |
870 | free_pages((unsigned long)vaddr, get_order(size)); | 870 | free_pages((unsigned long)vaddr, get_order(size)); |
@@ -935,10 +935,10 @@ int iommu_tce_clear_param_check(struct iommu_table *tbl, | |||
935 | if (tce_value) | 935 | if (tce_value) |
936 | return -EINVAL; | 936 | return -EINVAL; |
937 | 937 | ||
938 | if (ioba & ~IOMMU_PAGE_MASK) | 938 | if (ioba & ~IOMMU_PAGE_MASK(tbl)) |
939 | return -EINVAL; | 939 | return -EINVAL; |
940 | 940 | ||
941 | ioba >>= IOMMU_PAGE_SHIFT; | 941 | ioba >>= tbl->it_page_shift; |
942 | if (ioba < tbl->it_offset) | 942 | if (ioba < tbl->it_offset) |
943 | return -EINVAL; | 943 | return -EINVAL; |
944 | 944 | ||
@@ -955,13 +955,13 @@ int iommu_tce_put_param_check(struct iommu_table *tbl, | |||
955 | if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ))) | 955 | if (!(tce & (TCE_PCI_WRITE | TCE_PCI_READ))) |
956 | return -EINVAL; | 956 | return -EINVAL; |
957 | 957 | ||
958 | if (tce & ~(IOMMU_PAGE_MASK | TCE_PCI_WRITE | TCE_PCI_READ)) | 958 | if (tce & ~(IOMMU_PAGE_MASK(tbl) | TCE_PCI_WRITE | TCE_PCI_READ)) |
959 | return -EINVAL; | 959 | return -EINVAL; |
960 | 960 | ||
961 | if (ioba & ~IOMMU_PAGE_MASK) | 961 | if (ioba & ~IOMMU_PAGE_MASK(tbl)) |
962 | return -EINVAL; | 962 | return -EINVAL; |
963 | 963 | ||
964 | ioba >>= IOMMU_PAGE_SHIFT; | 964 | ioba >>= tbl->it_page_shift; |
965 | if (ioba < tbl->it_offset) | 965 | if (ioba < tbl->it_offset) |
966 | return -EINVAL; | 966 | return -EINVAL; |
967 | 967 | ||
@@ -1037,7 +1037,7 @@ int iommu_tce_build(struct iommu_table *tbl, unsigned long entry, | |||
1037 | 1037 | ||
1038 | /* if (unlikely(ret)) | 1038 | /* if (unlikely(ret)) |
1039 | pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", | 1039 | pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n", |
1040 | __func__, hwaddr, entry << IOMMU_PAGE_SHIFT, | 1040 | __func__, hwaddr, entry << IOMMU_PAGE_SHIFT(tbl), |
1041 | hwaddr, ret); */ | 1041 | hwaddr, ret); */ |
1042 | 1042 | ||
1043 | return ret; | 1043 | return ret; |
@@ -1049,14 +1049,14 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry, | |||
1049 | { | 1049 | { |
1050 | int ret; | 1050 | int ret; |
1051 | struct page *page = NULL; | 1051 | struct page *page = NULL; |
1052 | unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK & ~PAGE_MASK; | 1052 | unsigned long hwaddr, offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK; |
1053 | enum dma_data_direction direction = iommu_tce_direction(tce); | 1053 | enum dma_data_direction direction = iommu_tce_direction(tce); |
1054 | 1054 | ||
1055 | ret = get_user_pages_fast(tce & PAGE_MASK, 1, | 1055 | ret = get_user_pages_fast(tce & PAGE_MASK, 1, |
1056 | direction != DMA_TO_DEVICE, &page); | 1056 | direction != DMA_TO_DEVICE, &page); |
1057 | if (unlikely(ret != 1)) { | 1057 | if (unlikely(ret != 1)) { |
1058 | /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n", | 1058 | /* pr_err("iommu_tce: get_user_pages_fast failed tce=%lx ioba=%lx ret=%d\n", |
1059 | tce, entry << IOMMU_PAGE_SHIFT, ret); */ | 1059 | tce, entry << IOMMU_PAGE_SHIFT(tbl), ret); */ |
1060 | return -EFAULT; | 1060 | return -EFAULT; |
1061 | } | 1061 | } |
1062 | hwaddr = (unsigned long) page_address(page) + offset; | 1062 | hwaddr = (unsigned long) page_address(page) + offset; |
@@ -1067,7 +1067,7 @@ int iommu_put_tce_user_mode(struct iommu_table *tbl, unsigned long entry, | |||
1067 | 1067 | ||
1068 | if (ret < 0) | 1068 | if (ret < 0) |
1069 | pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n", | 1069 | pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%d\n", |
1070 | __func__, entry << IOMMU_PAGE_SHIFT, tce, ret); | 1070 | __func__, entry << tbl->it_page_shift, tce, ret); |
1071 | 1071 | ||
1072 | return ret; | 1072 | return ret; |
1073 | } | 1073 | } |
@@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl) | |||
1088 | memset(tbl->it_map, 0xff, sz); | 1088 | memset(tbl->it_map, 0xff, sz); |
1089 | iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); | 1089 | iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size); |
1090 | 1090 | ||
1091 | /* | ||
1092 | * Disable iommu bypass, otherwise the user can DMA to all of | ||
1093 | * our physical memory via the bypass window instead of just | ||
1094 | * the pages that has been explicitly mapped into the iommu | ||
1095 | */ | ||
1096 | if (tbl->set_bypass) | ||
1097 | tbl->set_bypass(tbl, false); | ||
1098 | |||
1091 | return 0; | 1099 | return 0; |
1092 | } | 1100 | } |
1093 | EXPORT_SYMBOL_GPL(iommu_take_ownership); | 1101 | EXPORT_SYMBOL_GPL(iommu_take_ownership); |
@@ -1102,10 +1110,14 @@ void iommu_release_ownership(struct iommu_table *tbl) | |||
1102 | /* Restore bit#0 set by iommu_init_table() */ | 1110 | /* Restore bit#0 set by iommu_init_table() */ |
1103 | if (tbl->it_offset == 0) | 1111 | if (tbl->it_offset == 0) |
1104 | set_bit(0, tbl->it_map); | 1112 | set_bit(0, tbl->it_map); |
1113 | |||
1114 | /* The kernel owns the device now, we can restore the iommu bypass */ | ||
1115 | if (tbl->set_bypass) | ||
1116 | tbl->set_bypass(tbl, true); | ||
1105 | } | 1117 | } |
1106 | EXPORT_SYMBOL_GPL(iommu_release_ownership); | 1118 | EXPORT_SYMBOL_GPL(iommu_release_ownership); |
1107 | 1119 | ||
1108 | static int iommu_add_device(struct device *dev) | 1120 | int iommu_add_device(struct device *dev) |
1109 | { | 1121 | { |
1110 | struct iommu_table *tbl; | 1122 | struct iommu_table *tbl; |
1111 | int ret = 0; | 1123 | int ret = 0; |
@@ -1127,6 +1139,12 @@ static int iommu_add_device(struct device *dev) | |||
1127 | pr_debug("iommu_tce: adding %s to iommu group %d\n", | 1139 | pr_debug("iommu_tce: adding %s to iommu group %d\n", |
1128 | dev_name(dev), iommu_group_id(tbl->it_group)); | 1140 | dev_name(dev), iommu_group_id(tbl->it_group)); |
1129 | 1141 | ||
1142 | if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) { | ||
1143 | pr_err("iommu_tce: unsupported iommu page size."); | ||
1144 | pr_err("%s has not been added\n", dev_name(dev)); | ||
1145 | return -EINVAL; | ||
1146 | } | ||
1147 | |||
1130 | ret = iommu_group_add_device(tbl->it_group, dev); | 1148 | ret = iommu_group_add_device(tbl->it_group, dev); |
1131 | if (ret < 0) | 1149 | if (ret < 0) |
1132 | pr_err("iommu_tce: %s has not been added, ret=%d\n", | 1150 | pr_err("iommu_tce: %s has not been added, ret=%d\n", |
@@ -1134,52 +1152,23 @@ static int iommu_add_device(struct device *dev) | |||
1134 | 1152 | ||
1135 | return ret; | 1153 | return ret; |
1136 | } | 1154 | } |
1155 | EXPORT_SYMBOL_GPL(iommu_add_device); | ||
1137 | 1156 | ||
1138 | static void iommu_del_device(struct device *dev) | 1157 | void iommu_del_device(struct device *dev) |
1139 | { | ||
1140 | iommu_group_remove_device(dev); | ||
1141 | } | ||
1142 | |||
1143 | static int iommu_bus_notifier(struct notifier_block *nb, | ||
1144 | unsigned long action, void *data) | ||
1145 | { | 1158 | { |
1146 | struct device *dev = data; | 1159 | /* |
1147 | 1160 | * Some devices might not have IOMMU table and group | |
1148 | switch (action) { | 1161 | * and we needn't detach them from the associated |
1149 | case BUS_NOTIFY_ADD_DEVICE: | 1162 | * IOMMU groups |
1150 | return iommu_add_device(dev); | 1163 | */ |
1151 | case BUS_NOTIFY_DEL_DEVICE: | 1164 | if (!dev->iommu_group) { |
1152 | iommu_del_device(dev); | 1165 | pr_debug("iommu_tce: skipping device %s with no tbl\n", |
1153 | return 0; | 1166 | dev_name(dev)); |
1154 | default: | 1167 | return; |
1155 | return 0; | ||
1156 | } | 1168 | } |
1157 | } | ||
1158 | |||
1159 | static struct notifier_block tce_iommu_bus_nb = { | ||
1160 | .notifier_call = iommu_bus_notifier, | ||
1161 | }; | ||
1162 | |||
1163 | static int __init tce_iommu_init(void) | ||
1164 | { | ||
1165 | struct pci_dev *pdev = NULL; | ||
1166 | |||
1167 | BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE); | ||
1168 | |||
1169 | for_each_pci_dev(pdev) | ||
1170 | iommu_add_device(&pdev->dev); | ||
1171 | |||
1172 | bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb); | ||
1173 | return 0; | ||
1174 | } | ||
1175 | |||
1176 | subsys_initcall_sync(tce_iommu_init); | ||
1177 | |||
1178 | #else | ||
1179 | 1169 | ||
1180 | void iommu_register_group(struct iommu_table *tbl, | 1170 | iommu_group_remove_device(dev); |
1181 | int pci_domain_number, unsigned long pe_num) | ||
1182 | { | ||
1183 | } | 1171 | } |
1172 | EXPORT_SYMBOL_GPL(iommu_del_device); | ||
1184 | 1173 | ||
1185 | #endif /* CONFIG_IOMMU_API */ | 1174 | #endif /* CONFIG_IOMMU_API */ |