diff options
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 307 |
1 files changed, 140 insertions, 167 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index c3edcdc08e72..8b51e10b7783 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -33,8 +33,8 @@ | |||
33 | #include <linux/dma-mapping.h> | 33 | #include <linux/dma-mapping.h> |
34 | #include <linux/mempool.h> | 34 | #include <linux/mempool.h> |
35 | #include <linux/timer.h> | 35 | #include <linux/timer.h> |
36 | #include "iova.h" | 36 | #include <linux/iova.h> |
37 | #include "intel-iommu.h" | 37 | #include <linux/intel-iommu.h> |
38 | #include <asm/proto.h> /* force_iommu in this header in x86-64*/ | 38 | #include <asm/proto.h> /* force_iommu in this header in x86-64*/ |
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/iommu.h> | 40 | #include <asm/iommu.h> |
@@ -49,8 +49,6 @@ | |||
49 | 49 | ||
50 | #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 | 50 | #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 |
51 | 51 | ||
52 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ | ||
53 | |||
54 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) | 52 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) |
55 | 53 | ||
56 | 54 | ||
@@ -58,8 +56,6 @@ static void flush_unmaps_timeout(unsigned long data); | |||
58 | 56 | ||
59 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); | 57 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); |
60 | 58 | ||
61 | static struct intel_iommu *g_iommus; | ||
62 | |||
63 | #define HIGH_WATER_MARK 250 | 59 | #define HIGH_WATER_MARK 250 |
64 | struct deferred_flush_tables { | 60 | struct deferred_flush_tables { |
65 | int next; | 61 | int next; |
@@ -80,7 +76,7 @@ static long list_size; | |||
80 | 76 | ||
81 | static void domain_remove_dev_info(struct dmar_domain *domain); | 77 | static void domain_remove_dev_info(struct dmar_domain *domain); |
82 | 78 | ||
83 | static int dmar_disabled; | 79 | int dmar_disabled; |
84 | static int __initdata dmar_map_gfx = 1; | 80 | static int __initdata dmar_map_gfx = 1; |
85 | static int dmar_forcedac; | 81 | static int dmar_forcedac; |
86 | static int intel_iommu_strict; | 82 | static int intel_iommu_strict; |
@@ -160,7 +156,7 @@ static inline void *alloc_domain_mem(void) | |||
160 | return iommu_kmem_cache_alloc(iommu_domain_cache); | 156 | return iommu_kmem_cache_alloc(iommu_domain_cache); |
161 | } | 157 | } |
162 | 158 | ||
163 | static inline void free_domain_mem(void *vaddr) | 159 | static void free_domain_mem(void *vaddr) |
164 | { | 160 | { |
165 | kmem_cache_free(iommu_domain_cache, vaddr); | 161 | kmem_cache_free(iommu_domain_cache, vaddr); |
166 | } | 162 | } |
@@ -185,13 +181,6 @@ void free_iova_mem(struct iova *iova) | |||
185 | kmem_cache_free(iommu_iova_cache, iova); | 181 | kmem_cache_free(iommu_iova_cache, iova); |
186 | } | 182 | } |
187 | 183 | ||
188 | static inline void __iommu_flush_cache( | ||
189 | struct intel_iommu *iommu, void *addr, int size) | ||
190 | { | ||
191 | if (!ecap_coherent(iommu->ecap)) | ||
192 | clflush_cache_range(addr, size); | ||
193 | } | ||
194 | |||
195 | /* Gets context entry for a given bus and devfn */ | 184 | /* Gets context entry for a given bus and devfn */ |
196 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | 185 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, |
197 | u8 bus, u8 devfn) | 186 | u8 bus, u8 devfn) |
@@ -488,19 +477,6 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
488 | return 0; | 477 | return 0; |
489 | } | 478 | } |
490 | 479 | ||
491 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ | ||
492 | {\ | ||
493 | cycles_t start_time = get_cycles();\ | ||
494 | while (1) {\ | ||
495 | sts = op (iommu->reg + offset);\ | ||
496 | if (cond)\ | ||
497 | break;\ | ||
498 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ | ||
499 | panic("DMAR hardware is malfunctioning\n");\ | ||
500 | cpu_relax();\ | ||
501 | }\ | ||
502 | } | ||
503 | |||
504 | static void iommu_set_root_entry(struct intel_iommu *iommu) | 480 | static void iommu_set_root_entry(struct intel_iommu *iommu) |
505 | { | 481 | { |
506 | void *addr; | 482 | void *addr; |
@@ -587,7 +563,7 @@ static int __iommu_flush_context(struct intel_iommu *iommu, | |||
587 | 563 | ||
588 | spin_unlock_irqrestore(&iommu->register_lock, flag); | 564 | spin_unlock_irqrestore(&iommu->register_lock, flag); |
589 | 565 | ||
590 | /* flush context entry will implictly flush write buffer */ | 566 | /* flush context entry will implicitly flush write buffer */ |
591 | return 0; | 567 | return 0; |
592 | } | 568 | } |
593 | 569 | ||
@@ -680,7 +656,7 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, | |||
680 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) | 656 | if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) |
681 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", | 657 | pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", |
682 | DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); | 658 | DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); |
683 | /* flush context entry will implictly flush write buffer */ | 659 | /* flush iotlb entry will implicitly flush write buffer */ |
684 | return 0; | 660 | return 0; |
685 | } | 661 | } |
686 | 662 | ||
@@ -990,6 +966,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
990 | return -ENOMEM; | 966 | return -ENOMEM; |
991 | } | 967 | } |
992 | 968 | ||
969 | spin_lock_init(&iommu->lock); | ||
970 | |||
993 | /* | 971 | /* |
994 | * if Caching mode is set, then invalid translations are tagged | 972 | * if Caching mode is set, then invalid translations are tagged |
995 | * with domainid 0. Hence we need to pre-allocate it. | 973 | * with domainid 0. Hence we need to pre-allocate it. |
@@ -998,62 +976,15 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
998 | set_bit(0, iommu->domain_ids); | 976 | set_bit(0, iommu->domain_ids); |
999 | return 0; | 977 | return 0; |
1000 | } | 978 | } |
1001 | static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu, | ||
1002 | struct dmar_drhd_unit *drhd) | ||
1003 | { | ||
1004 | int ret; | ||
1005 | int map_size; | ||
1006 | u32 ver; | ||
1007 | |||
1008 | iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); | ||
1009 | if (!iommu->reg) { | ||
1010 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
1011 | goto error; | ||
1012 | } | ||
1013 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | ||
1014 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | ||
1015 | |||
1016 | /* the registers might be more than one page */ | ||
1017 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | ||
1018 | cap_max_fault_reg_offset(iommu->cap)); | ||
1019 | map_size = PAGE_ALIGN_4K(map_size); | ||
1020 | if (map_size > PAGE_SIZE_4K) { | ||
1021 | iounmap(iommu->reg); | ||
1022 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | ||
1023 | if (!iommu->reg) { | ||
1024 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
1025 | goto error; | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | ver = readl(iommu->reg + DMAR_VER_REG); | ||
1030 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | ||
1031 | drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | ||
1032 | iommu->cap, iommu->ecap); | ||
1033 | ret = iommu_init_domains(iommu); | ||
1034 | if (ret) | ||
1035 | goto error_unmap; | ||
1036 | spin_lock_init(&iommu->lock); | ||
1037 | spin_lock_init(&iommu->register_lock); | ||
1038 | 979 | ||
1039 | drhd->iommu = iommu; | ||
1040 | return iommu; | ||
1041 | error_unmap: | ||
1042 | iounmap(iommu->reg); | ||
1043 | error: | ||
1044 | kfree(iommu); | ||
1045 | return NULL; | ||
1046 | } | ||
1047 | 980 | ||
1048 | static void domain_exit(struct dmar_domain *domain); | 981 | static void domain_exit(struct dmar_domain *domain); |
1049 | static void free_iommu(struct intel_iommu *iommu) | 982 | |
983 | void free_dmar_iommu(struct intel_iommu *iommu) | ||
1050 | { | 984 | { |
1051 | struct dmar_domain *domain; | 985 | struct dmar_domain *domain; |
1052 | int i; | 986 | int i; |
1053 | 987 | ||
1054 | if (!iommu) | ||
1055 | return; | ||
1056 | |||
1057 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); | 988 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); |
1058 | for (; i < cap_ndoms(iommu->cap); ) { | 989 | for (; i < cap_ndoms(iommu->cap); ) { |
1059 | domain = iommu->domains[i]; | 990 | domain = iommu->domains[i]; |
@@ -1078,10 +1009,6 @@ static void free_iommu(struct intel_iommu *iommu) | |||
1078 | 1009 | ||
1079 | /* free context mapping */ | 1010 | /* free context mapping */ |
1080 | free_context_table(iommu); | 1011 | free_context_table(iommu); |
1081 | |||
1082 | if (iommu->reg) | ||
1083 | iounmap(iommu->reg); | ||
1084 | kfree(iommu); | ||
1085 | } | 1012 | } |
1086 | 1013 | ||
1087 | static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | 1014 | static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) |
@@ -1414,7 +1341,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain) | |||
1414 | * find_domain | 1341 | * find_domain |
1415 | * Note: we use struct pci_dev->dev.archdata.iommu stores the info | 1342 | * Note: we use struct pci_dev->dev.archdata.iommu stores the info |
1416 | */ | 1343 | */ |
1417 | struct dmar_domain * | 1344 | static struct dmar_domain * |
1418 | find_domain(struct pci_dev *pdev) | 1345 | find_domain(struct pci_dev *pdev) |
1419 | { | 1346 | { |
1420 | struct device_domain_info *info; | 1347 | struct device_domain_info *info; |
@@ -1426,37 +1353,6 @@ find_domain(struct pci_dev *pdev) | |||
1426 | return NULL; | 1353 | return NULL; |
1427 | } | 1354 | } |
1428 | 1355 | ||
1429 | static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | ||
1430 | struct pci_dev *dev) | ||
1431 | { | ||
1432 | int index; | ||
1433 | |||
1434 | while (dev) { | ||
1435 | for (index = 0; index < cnt; index++) | ||
1436 | if (dev == devices[index]) | ||
1437 | return 1; | ||
1438 | |||
1439 | /* Check our parent */ | ||
1440 | dev = dev->bus->self; | ||
1441 | } | ||
1442 | |||
1443 | return 0; | ||
1444 | } | ||
1445 | |||
1446 | static struct dmar_drhd_unit * | ||
1447 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | ||
1448 | { | ||
1449 | struct dmar_drhd_unit *drhd = NULL; | ||
1450 | |||
1451 | list_for_each_entry(drhd, &dmar_drhd_units, list) { | ||
1452 | if (drhd->include_all || dmar_pci_device_match(drhd->devices, | ||
1453 | drhd->devices_cnt, dev)) | ||
1454 | return drhd; | ||
1455 | } | ||
1456 | |||
1457 | return NULL; | ||
1458 | } | ||
1459 | |||
1460 | /* domain is initialized */ | 1356 | /* domain is initialized */ |
1461 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | 1357 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) |
1462 | { | 1358 | { |
@@ -1729,8 +1625,6 @@ int __init init_dmars(void) | |||
1729 | * endfor | 1625 | * endfor |
1730 | */ | 1626 | */ |
1731 | for_each_drhd_unit(drhd) { | 1627 | for_each_drhd_unit(drhd) { |
1732 | if (drhd->ignored) | ||
1733 | continue; | ||
1734 | g_num_of_iommus++; | 1628 | g_num_of_iommus++; |
1735 | /* | 1629 | /* |
1736 | * lock not needed as this is only incremented in the single | 1630 | * lock not needed as this is only incremented in the single |
@@ -1739,12 +1633,6 @@ int __init init_dmars(void) | |||
1739 | */ | 1633 | */ |
1740 | } | 1634 | } |
1741 | 1635 | ||
1742 | g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL); | ||
1743 | if (!g_iommus) { | ||
1744 | ret = -ENOMEM; | ||
1745 | goto error; | ||
1746 | } | ||
1747 | |||
1748 | deferred_flush = kzalloc(g_num_of_iommus * | 1636 | deferred_flush = kzalloc(g_num_of_iommus * |
1749 | sizeof(struct deferred_flush_tables), GFP_KERNEL); | 1637 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
1750 | if (!deferred_flush) { | 1638 | if (!deferred_flush) { |
@@ -1752,16 +1640,15 @@ int __init init_dmars(void) | |||
1752 | goto error; | 1640 | goto error; |
1753 | } | 1641 | } |
1754 | 1642 | ||
1755 | i = 0; | ||
1756 | for_each_drhd_unit(drhd) { | 1643 | for_each_drhd_unit(drhd) { |
1757 | if (drhd->ignored) | 1644 | if (drhd->ignored) |
1758 | continue; | 1645 | continue; |
1759 | iommu = alloc_iommu(&g_iommus[i], drhd); | 1646 | |
1760 | i++; | 1647 | iommu = drhd->iommu; |
1761 | if (!iommu) { | 1648 | |
1762 | ret = -ENOMEM; | 1649 | ret = iommu_init_domains(iommu); |
1650 | if (ret) | ||
1763 | goto error; | 1651 | goto error; |
1764 | } | ||
1765 | 1652 | ||
1766 | /* | 1653 | /* |
1767 | * TBD: | 1654 | * TBD: |
@@ -1845,7 +1732,6 @@ error: | |||
1845 | iommu = drhd->iommu; | 1732 | iommu = drhd->iommu; |
1846 | free_iommu(iommu); | 1733 | free_iommu(iommu); |
1847 | } | 1734 | } |
1848 | kfree(g_iommus); | ||
1849 | return ret; | 1735 | return ret; |
1850 | } | 1736 | } |
1851 | 1737 | ||
@@ -2002,7 +1888,10 @@ static void flush_unmaps(void) | |||
2002 | /* just flush them all */ | 1888 | /* just flush them all */ |
2003 | for (i = 0; i < g_num_of_iommus; i++) { | 1889 | for (i = 0; i < g_num_of_iommus; i++) { |
2004 | if (deferred_flush[i].next) { | 1890 | if (deferred_flush[i].next) { |
2005 | iommu_flush_iotlb_global(&g_iommus[i], 0); | 1891 | struct intel_iommu *iommu = |
1892 | deferred_flush[i].domain[0]->iommu; | ||
1893 | |||
1894 | iommu_flush_iotlb_global(iommu, 0); | ||
2006 | for (j = 0; j < deferred_flush[i].next; j++) { | 1895 | for (j = 0; j < deferred_flush[i].next; j++) { |
2007 | __free_iova(&deferred_flush[i].domain[j]->iovad, | 1896 | __free_iova(&deferred_flush[i].domain[j]->iovad, |
2008 | deferred_flush[i].iova[j]); | 1897 | deferred_flush[i].iova[j]); |
@@ -2032,7 +1921,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
2032 | if (list_size == HIGH_WATER_MARK) | 1921 | if (list_size == HIGH_WATER_MARK) |
2033 | flush_unmaps(); | 1922 | flush_unmaps(); |
2034 | 1923 | ||
2035 | iommu_id = dom->iommu - g_iommus; | 1924 | iommu_id = dom->iommu->seq_id; |
1925 | |||
2036 | next = deferred_flush[iommu_id].next; | 1926 | next = deferred_flush[iommu_id].next; |
2037 | deferred_flush[iommu_id].domain[next] = dom; | 1927 | deferred_flush[iommu_id].domain[next] = dom; |
2038 | deferred_flush[iommu_id].iova[next] = iova; | 1928 | deferred_flush[iommu_id].iova[next] = iova; |
@@ -2348,38 +2238,6 @@ static void __init iommu_exit_mempool(void) | |||
2348 | 2238 | ||
2349 | } | 2239 | } |
2350 | 2240 | ||
2351 | static int blacklist_iommu(const struct dmi_system_id *id) | ||
2352 | { | ||
2353 | printk(KERN_INFO "%s detected; disabling IOMMU\n", | ||
2354 | id->ident); | ||
2355 | dmar_disabled = 1; | ||
2356 | return 0; | ||
2357 | } | ||
2358 | |||
2359 | static struct dmi_system_id __initdata intel_iommu_dmi_table[] = { | ||
2360 | { /* Some DG33BU BIOS revisions advertised non-existent VT-d */ | ||
2361 | .callback = blacklist_iommu, | ||
2362 | .ident = "Intel DG33BU", | ||
2363 | { DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), | ||
2364 | DMI_MATCH(DMI_BOARD_NAME, "DG33BU"), | ||
2365 | } | ||
2366 | }, | ||
2367 | { } | ||
2368 | }; | ||
2369 | |||
2370 | |||
2371 | void __init detect_intel_iommu(void) | ||
2372 | { | ||
2373 | if (swiotlb || no_iommu || iommu_detected || dmar_disabled) | ||
2374 | return; | ||
2375 | if (early_dmar_detect()) { | ||
2376 | dmi_check_system(intel_iommu_dmi_table); | ||
2377 | if (dmar_disabled) | ||
2378 | return; | ||
2379 | iommu_detected = 1; | ||
2380 | } | ||
2381 | } | ||
2382 | |||
2383 | static void __init init_no_remapping_devices(void) | 2241 | static void __init init_no_remapping_devices(void) |
2384 | { | 2242 | { |
2385 | struct dmar_drhd_unit *drhd; | 2243 | struct dmar_drhd_unit *drhd; |
@@ -2426,12 +2284,19 @@ int __init intel_iommu_init(void) | |||
2426 | { | 2284 | { |
2427 | int ret = 0; | 2285 | int ret = 0; |
2428 | 2286 | ||
2429 | if (no_iommu || swiotlb || dmar_disabled) | ||
2430 | return -ENODEV; | ||
2431 | |||
2432 | if (dmar_table_init()) | 2287 | if (dmar_table_init()) |
2433 | return -ENODEV; | 2288 | return -ENODEV; |
2434 | 2289 | ||
2290 | if (dmar_dev_scope_init()) | ||
2291 | return -ENODEV; | ||
2292 | |||
2293 | /* | ||
2294 | * Check the need for DMA-remapping initialization now. | ||
2295 | * Above initialization will also be used by Interrupt-remapping. | ||
2296 | */ | ||
2297 | if (no_iommu || swiotlb || dmar_disabled) | ||
2298 | return -ENODEV; | ||
2299 | |||
2435 | iommu_init_mempool(); | 2300 | iommu_init_mempool(); |
2436 | dmar_init_reserved_ranges(); | 2301 | dmar_init_reserved_ranges(); |
2437 | 2302 | ||
@@ -2453,3 +2318,111 @@ int __init intel_iommu_init(void) | |||
2453 | return 0; | 2318 | return 0; |
2454 | } | 2319 | } |
2455 | 2320 | ||
2321 | void intel_iommu_domain_exit(struct dmar_domain *domain) | ||
2322 | { | ||
2323 | u64 end; | ||
2324 | |||
2325 | /* Domain 0 is reserved, so dont process it */ | ||
2326 | if (!domain) | ||
2327 | return; | ||
2328 | |||
2329 | end = DOMAIN_MAX_ADDR(domain->gaw); | ||
2330 | end = end & (~PAGE_MASK_4K); | ||
2331 | |||
2332 | /* clear ptes */ | ||
2333 | dma_pte_clear_range(domain, 0, end); | ||
2334 | |||
2335 | /* free page tables */ | ||
2336 | dma_pte_free_pagetable(domain, 0, end); | ||
2337 | |||
2338 | iommu_free_domain(domain); | ||
2339 | free_domain_mem(domain); | ||
2340 | } | ||
2341 | EXPORT_SYMBOL_GPL(intel_iommu_domain_exit); | ||
2342 | |||
2343 | struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev) | ||
2344 | { | ||
2345 | struct dmar_drhd_unit *drhd; | ||
2346 | struct dmar_domain *domain; | ||
2347 | struct intel_iommu *iommu; | ||
2348 | |||
2349 | drhd = dmar_find_matched_drhd_unit(pdev); | ||
2350 | if (!drhd) { | ||
2351 | printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n"); | ||
2352 | return NULL; | ||
2353 | } | ||
2354 | |||
2355 | iommu = drhd->iommu; | ||
2356 | if (!iommu) { | ||
2357 | printk(KERN_ERR | ||
2358 | "intel_iommu_domain_alloc: iommu == NULL\n"); | ||
2359 | return NULL; | ||
2360 | } | ||
2361 | domain = iommu_alloc_domain(iommu); | ||
2362 | if (!domain) { | ||
2363 | printk(KERN_ERR | ||
2364 | "intel_iommu_domain_alloc: domain == NULL\n"); | ||
2365 | return NULL; | ||
2366 | } | ||
2367 | if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | ||
2368 | printk(KERN_ERR | ||
2369 | "intel_iommu_domain_alloc: domain_init() failed\n"); | ||
2370 | intel_iommu_domain_exit(domain); | ||
2371 | return NULL; | ||
2372 | } | ||
2373 | return domain; | ||
2374 | } | ||
2375 | EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc); | ||
2376 | |||
2377 | int intel_iommu_context_mapping( | ||
2378 | struct dmar_domain *domain, struct pci_dev *pdev) | ||
2379 | { | ||
2380 | int rc; | ||
2381 | rc = domain_context_mapping(domain, pdev); | ||
2382 | return rc; | ||
2383 | } | ||
2384 | EXPORT_SYMBOL_GPL(intel_iommu_context_mapping); | ||
2385 | |||
2386 | int intel_iommu_page_mapping( | ||
2387 | struct dmar_domain *domain, dma_addr_t iova, | ||
2388 | u64 hpa, size_t size, int prot) | ||
2389 | { | ||
2390 | int rc; | ||
2391 | rc = domain_page_mapping(domain, iova, hpa, size, prot); | ||
2392 | return rc; | ||
2393 | } | ||
2394 | EXPORT_SYMBOL_GPL(intel_iommu_page_mapping); | ||
2395 | |||
2396 | void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn) | ||
2397 | { | ||
2398 | detach_domain_for_dev(domain, bus, devfn); | ||
2399 | } | ||
2400 | EXPORT_SYMBOL_GPL(intel_iommu_detach_dev); | ||
2401 | |||
2402 | struct dmar_domain * | ||
2403 | intel_iommu_find_domain(struct pci_dev *pdev) | ||
2404 | { | ||
2405 | return find_domain(pdev); | ||
2406 | } | ||
2407 | EXPORT_SYMBOL_GPL(intel_iommu_find_domain); | ||
2408 | |||
2409 | int intel_iommu_found(void) | ||
2410 | { | ||
2411 | return g_num_of_iommus; | ||
2412 | } | ||
2413 | EXPORT_SYMBOL_GPL(intel_iommu_found); | ||
2414 | |||
2415 | u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) | ||
2416 | { | ||
2417 | struct dma_pte *pte; | ||
2418 | u64 pfn; | ||
2419 | |||
2420 | pfn = 0; | ||
2421 | pte = addr_to_dma_pte(domain, iova); | ||
2422 | |||
2423 | if (pte) | ||
2424 | pfn = dma_pte_addr(*pte); | ||
2425 | |||
2426 | return pfn >> PAGE_SHIFT_4K; | ||
2427 | } | ||
2428 | EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); | ||