diff options
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 185 |
1 files changed, 25 insertions, 160 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 6c4c1c3c50ee..389fdd6f4a9f 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -49,8 +49,6 @@ | |||
49 | 49 | ||
50 | #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 | 50 | #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 |
51 | 51 | ||
52 | #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ | ||
53 | |||
54 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) | 52 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) |
55 | 53 | ||
56 | 54 | ||
@@ -58,8 +56,6 @@ static void flush_unmaps_timeout(unsigned long data); | |||
58 | 56 | ||
59 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); | 57 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); |
60 | 58 | ||
61 | static struct intel_iommu *g_iommus; | ||
62 | |||
63 | #define HIGH_WATER_MARK 250 | 59 | #define HIGH_WATER_MARK 250 |
64 | struct deferred_flush_tables { | 60 | struct deferred_flush_tables { |
65 | int next; | 61 | int next; |
@@ -185,13 +181,6 @@ void free_iova_mem(struct iova *iova) | |||
185 | kmem_cache_free(iommu_iova_cache, iova); | 181 | kmem_cache_free(iommu_iova_cache, iova); |
186 | } | 182 | } |
187 | 183 | ||
188 | static inline void __iommu_flush_cache( | ||
189 | struct intel_iommu *iommu, void *addr, int size) | ||
190 | { | ||
191 | if (!ecap_coherent(iommu->ecap)) | ||
192 | clflush_cache_range(addr, size); | ||
193 | } | ||
194 | |||
195 | /* Gets context entry for a given bus and devfn */ | 184 | /* Gets context entry for a given bus and devfn */ |
196 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | 185 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, |
197 | u8 bus, u8 devfn) | 186 | u8 bus, u8 devfn) |
@@ -488,19 +477,6 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) | |||
488 | return 0; | 477 | return 0; |
489 | } | 478 | } |
490 | 479 | ||
491 | #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ | ||
492 | {\ | ||
493 | cycles_t start_time = get_cycles();\ | ||
494 | while (1) {\ | ||
495 | sts = op (iommu->reg + offset);\ | ||
496 | if (cond)\ | ||
497 | break;\ | ||
498 | if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ | ||
499 | panic("DMAR hardware is malfunctioning\n");\ | ||
500 | cpu_relax();\ | ||
501 | }\ | ||
502 | } | ||
503 | |||
504 | static void iommu_set_root_entry(struct intel_iommu *iommu) | 480 | static void iommu_set_root_entry(struct intel_iommu *iommu) |
505 | { | 481 | { |
506 | void *addr; | 482 | void *addr; |
@@ -990,6 +966,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
990 | return -ENOMEM; | 966 | return -ENOMEM; |
991 | } | 967 | } |
992 | 968 | ||
969 | spin_lock_init(&iommu->lock); | ||
970 | |||
993 | /* | 971 | /* |
994 | * if Caching mode is set, then invalid translations are tagged | 972 | * if Caching mode is set, then invalid translations are tagged |
995 | * with domainid 0. Hence we need to pre-allocate it. | 973 | * with domainid 0. Hence we need to pre-allocate it. |
@@ -998,62 +976,15 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
998 | set_bit(0, iommu->domain_ids); | 976 | set_bit(0, iommu->domain_ids); |
999 | return 0; | 977 | return 0; |
1000 | } | 978 | } |
1001 | static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu, | ||
1002 | struct dmar_drhd_unit *drhd) | ||
1003 | { | ||
1004 | int ret; | ||
1005 | int map_size; | ||
1006 | u32 ver; | ||
1007 | |||
1008 | iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); | ||
1009 | if (!iommu->reg) { | ||
1010 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
1011 | goto error; | ||
1012 | } | ||
1013 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | ||
1014 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | ||
1015 | |||
1016 | /* the registers might be more than one page */ | ||
1017 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | ||
1018 | cap_max_fault_reg_offset(iommu->cap)); | ||
1019 | map_size = PAGE_ALIGN_4K(map_size); | ||
1020 | if (map_size > PAGE_SIZE_4K) { | ||
1021 | iounmap(iommu->reg); | ||
1022 | iommu->reg = ioremap(drhd->reg_base_addr, map_size); | ||
1023 | if (!iommu->reg) { | ||
1024 | printk(KERN_ERR "IOMMU: can't map the region\n"); | ||
1025 | goto error; | ||
1026 | } | ||
1027 | } | ||
1028 | |||
1029 | ver = readl(iommu->reg + DMAR_VER_REG); | ||
1030 | pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", | ||
1031 | drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | ||
1032 | iommu->cap, iommu->ecap); | ||
1033 | ret = iommu_init_domains(iommu); | ||
1034 | if (ret) | ||
1035 | goto error_unmap; | ||
1036 | spin_lock_init(&iommu->lock); | ||
1037 | spin_lock_init(&iommu->register_lock); | ||
1038 | 979 | ||
1039 | drhd->iommu = iommu; | ||
1040 | return iommu; | ||
1041 | error_unmap: | ||
1042 | iounmap(iommu->reg); | ||
1043 | error: | ||
1044 | kfree(iommu); | ||
1045 | return NULL; | ||
1046 | } | ||
1047 | 980 | ||
1048 | static void domain_exit(struct dmar_domain *domain); | 981 | static void domain_exit(struct dmar_domain *domain); |
1049 | static void free_iommu(struct intel_iommu *iommu) | 982 | |
983 | void free_dmar_iommu(struct intel_iommu *iommu) | ||
1050 | { | 984 | { |
1051 | struct dmar_domain *domain; | 985 | struct dmar_domain *domain; |
1052 | int i; | 986 | int i; |
1053 | 987 | ||
1054 | if (!iommu) | ||
1055 | return; | ||
1056 | |||
1057 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); | 988 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); |
1058 | for (; i < cap_ndoms(iommu->cap); ) { | 989 | for (; i < cap_ndoms(iommu->cap); ) { |
1059 | domain = iommu->domains[i]; | 990 | domain = iommu->domains[i]; |
@@ -1078,10 +1009,6 @@ static void free_iommu(struct intel_iommu *iommu) | |||
1078 | 1009 | ||
1079 | /* free context mapping */ | 1010 | /* free context mapping */ |
1080 | free_context_table(iommu); | 1011 | free_context_table(iommu); |
1081 | |||
1082 | if (iommu->reg) | ||
1083 | iounmap(iommu->reg); | ||
1084 | kfree(iommu); | ||
1085 | } | 1012 | } |
1086 | 1013 | ||
1087 | static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | 1014 | static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) |
@@ -1426,37 +1353,6 @@ find_domain(struct pci_dev *pdev) | |||
1426 | return NULL; | 1353 | return NULL; |
1427 | } | 1354 | } |
1428 | 1355 | ||
1429 | static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | ||
1430 | struct pci_dev *dev) | ||
1431 | { | ||
1432 | int index; | ||
1433 | |||
1434 | while (dev) { | ||
1435 | for (index = 0; index < cnt; index++) | ||
1436 | if (dev == devices[index]) | ||
1437 | return 1; | ||
1438 | |||
1439 | /* Check our parent */ | ||
1440 | dev = dev->bus->self; | ||
1441 | } | ||
1442 | |||
1443 | return 0; | ||
1444 | } | ||
1445 | |||
1446 | static struct dmar_drhd_unit * | ||
1447 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | ||
1448 | { | ||
1449 | struct dmar_drhd_unit *drhd = NULL; | ||
1450 | |||
1451 | list_for_each_entry(drhd, &dmar_drhd_units, list) { | ||
1452 | if (drhd->include_all || dmar_pci_device_match(drhd->devices, | ||
1453 | drhd->devices_cnt, dev)) | ||
1454 | return drhd; | ||
1455 | } | ||
1456 | |||
1457 | return NULL; | ||
1458 | } | ||
1459 | |||
1460 | /* domain is initialized */ | 1356 | /* domain is initialized */ |
1461 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | 1357 | static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) |
1462 | { | 1358 | { |
@@ -1729,8 +1625,6 @@ int __init init_dmars(void) | |||
1729 | * endfor | 1625 | * endfor |
1730 | */ | 1626 | */ |
1731 | for_each_drhd_unit(drhd) { | 1627 | for_each_drhd_unit(drhd) { |
1732 | if (drhd->ignored) | ||
1733 | continue; | ||
1734 | g_num_of_iommus++; | 1628 | g_num_of_iommus++; |
1735 | /* | 1629 | /* |
1736 | * lock not needed as this is only incremented in the single | 1630 | * lock not needed as this is only incremented in the single |
@@ -1739,12 +1633,6 @@ int __init init_dmars(void) | |||
1739 | */ | 1633 | */ |
1740 | } | 1634 | } |
1741 | 1635 | ||
1742 | g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL); | ||
1743 | if (!g_iommus) { | ||
1744 | ret = -ENOMEM; | ||
1745 | goto error; | ||
1746 | } | ||
1747 | |||
1748 | deferred_flush = kzalloc(g_num_of_iommus * | 1636 | deferred_flush = kzalloc(g_num_of_iommus * |
1749 | sizeof(struct deferred_flush_tables), GFP_KERNEL); | 1637 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
1750 | if (!deferred_flush) { | 1638 | if (!deferred_flush) { |
@@ -1752,16 +1640,15 @@ int __init init_dmars(void) | |||
1752 | goto error; | 1640 | goto error; |
1753 | } | 1641 | } |
1754 | 1642 | ||
1755 | i = 0; | ||
1756 | for_each_drhd_unit(drhd) { | 1643 | for_each_drhd_unit(drhd) { |
1757 | if (drhd->ignored) | 1644 | if (drhd->ignored) |
1758 | continue; | 1645 | continue; |
1759 | iommu = alloc_iommu(&g_iommus[i], drhd); | 1646 | |
1760 | i++; | 1647 | iommu = drhd->iommu; |
1761 | if (!iommu) { | 1648 | |
1762 | ret = -ENOMEM; | 1649 | ret = iommu_init_domains(iommu); |
1650 | if (ret) | ||
1763 | goto error; | 1651 | goto error; |
1764 | } | ||
1765 | 1652 | ||
1766 | /* | 1653 | /* |
1767 | * TBD: | 1654 | * TBD: |
@@ -1845,7 +1732,6 @@ error: | |||
1845 | iommu = drhd->iommu; | 1732 | iommu = drhd->iommu; |
1846 | free_iommu(iommu); | 1733 | free_iommu(iommu); |
1847 | } | 1734 | } |
1848 | kfree(g_iommus); | ||
1849 | return ret; | 1735 | return ret; |
1850 | } | 1736 | } |
1851 | 1737 | ||
@@ -2002,7 +1888,10 @@ static void flush_unmaps(void) | |||
2002 | /* just flush them all */ | 1888 | /* just flush them all */ |
2003 | for (i = 0; i < g_num_of_iommus; i++) { | 1889 | for (i = 0; i < g_num_of_iommus; i++) { |
2004 | if (deferred_flush[i].next) { | 1890 | if (deferred_flush[i].next) { |
2005 | iommu_flush_iotlb_global(&g_iommus[i], 0); | 1891 | struct intel_iommu *iommu = |
1892 | deferred_flush[i].domain[0]->iommu; | ||
1893 | |||
1894 | iommu_flush_iotlb_global(iommu, 0); | ||
2006 | for (j = 0; j < deferred_flush[i].next; j++) { | 1895 | for (j = 0; j < deferred_flush[i].next; j++) { |
2007 | __free_iova(&deferred_flush[i].domain[j]->iovad, | 1896 | __free_iova(&deferred_flush[i].domain[j]->iovad, |
2008 | deferred_flush[i].iova[j]); | 1897 | deferred_flush[i].iova[j]); |
@@ -2032,7 +1921,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
2032 | if (list_size == HIGH_WATER_MARK) | 1921 | if (list_size == HIGH_WATER_MARK) |
2033 | flush_unmaps(); | 1922 | flush_unmaps(); |
2034 | 1923 | ||
2035 | iommu_id = dom->iommu - g_iommus; | 1924 | iommu_id = dom->iommu->seq_id; |
1925 | |||
2036 | next = deferred_flush[iommu_id].next; | 1926 | next = deferred_flush[iommu_id].next; |
2037 | deferred_flush[iommu_id].domain[next] = dom; | 1927 | deferred_flush[iommu_id].domain[next] = dom; |
2038 | deferred_flush[iommu_id].iova[next] = iova; | 1928 | deferred_flush[iommu_id].iova[next] = iova; |
@@ -2348,38 +2238,6 @@ static void __init iommu_exit_mempool(void) | |||
2348 | 2238 | ||
2349 | } | 2239 | } |
2350 | 2240 | ||
2351 | static int blacklist_iommu(const struct dmi_system_id *id) | ||
2352 | { | ||
2353 | printk(KERN_INFO "%s detected; disabling IOMMU\n", | ||
2354 | id->ident); | ||
2355 | dmar_disabled = 1; | ||
2356 | return 0; | ||
2357 | } | ||
2358 | |||
2359 | static struct dmi_system_id __initdata intel_iommu_dmi_table[] = { | ||
2360 | { /* Some DG33BU BIOS revisions advertised non-existent VT-d */ | ||
2361 | .callback = blacklist_iommu, | ||
2362 | .ident = "Intel DG33BU", | ||
2363 | { DMI_MATCH(DMI_BOARD_VENDOR, "Intel Corporation"), | ||
2364 | DMI_MATCH(DMI_BOARD_NAME, "DG33BU"), | ||
2365 | } | ||
2366 | }, | ||
2367 | { } | ||
2368 | }; | ||
2369 | |||
2370 | |||
2371 | void __init detect_intel_iommu(void) | ||
2372 | { | ||
2373 | if (swiotlb || no_iommu || iommu_detected || dmar_disabled) | ||
2374 | return; | ||
2375 | if (early_dmar_detect()) { | ||
2376 | dmi_check_system(intel_iommu_dmi_table); | ||
2377 | if (dmar_disabled) | ||
2378 | return; | ||
2379 | iommu_detected = 1; | ||
2380 | } | ||
2381 | } | ||
2382 | |||
2383 | static void __init init_no_remapping_devices(void) | 2241 | static void __init init_no_remapping_devices(void) |
2384 | { | 2242 | { |
2385 | struct dmar_drhd_unit *drhd; | 2243 | struct dmar_drhd_unit *drhd; |
@@ -2426,12 +2284,19 @@ int __init intel_iommu_init(void) | |||
2426 | { | 2284 | { |
2427 | int ret = 0; | 2285 | int ret = 0; |
2428 | 2286 | ||
2429 | if (no_iommu || swiotlb || dmar_disabled) | ||
2430 | return -ENODEV; | ||
2431 | |||
2432 | if (dmar_table_init()) | 2287 | if (dmar_table_init()) |
2433 | return -ENODEV; | 2288 | return -ENODEV; |
2434 | 2289 | ||
2290 | if (dmar_dev_scope_init()) | ||
2291 | return -ENODEV; | ||
2292 | |||
2293 | /* | ||
2294 | * Check the need for DMA-remapping initialization now. | ||
2295 | * Above initialization will also be used by Interrupt-remapping. | ||
2296 | */ | ||
2297 | if (no_iommu || swiotlb || dmar_disabled) | ||
2298 | return -ENODEV; | ||
2299 | |||
2435 | iommu_init_mempool(); | 2300 | iommu_init_mempool(); |
2436 | dmar_init_reserved_ranges(); | 2301 | dmar_init_reserved_ranges(); |
2437 | 2302 | ||