diff options
author | Joerg Roedel <jroedel@suse.de> | 2015-07-21 08:45:31 -0400 |
---|---|---|
committer | Joerg Roedel <jroedel@suse.de> | 2015-08-12 10:23:33 -0400 |
commit | 28ccce0d954a1cf3baba335bf12581357112fb35 (patch) | |
tree | 8ac01d470f6e64e1b39b5856d84a6234161b13ae /drivers/iommu/intel-iommu.c | |
parent | e2411427f7d3ddcf8d5f35d5ab0a397180deac3a (diff) |
iommu/vt-d: Calculate translation in domain_context_mapping_one
There is no reason to pass the translation type through
multiple layers. It can also be determined in the
domain_context_mapping_one function directly.
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/intel-iommu.c')
-rw-r--r-- | drivers/iommu/intel-iommu.c | 49 |
1 files changed, 23 insertions, 26 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 0a07b44fbc82..ca6ca3ddf349 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -364,7 +364,8 @@ static inline int first_pte_in_page(struct dma_pte *pte) | |||
364 | static struct dmar_domain *si_domain; | 364 | static struct dmar_domain *si_domain; |
365 | static int hw_pass_through = 1; | 365 | static int hw_pass_through = 1; |
366 | 366 | ||
367 | /* domain represents a virtual machine, more than one devices | 367 | /* |
368 | * Domain represents a virtual machine, more than one devices | ||
368 | * across iommus may be owned in one domain, e.g. kvm guest. | 369 | * across iommus may be owned in one domain, e.g. kvm guest. |
369 | */ | 370 | */ |
370 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0) | 371 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 0) |
@@ -640,6 +641,11 @@ static inline int domain_type_is_vm(struct dmar_domain *domain) | |||
640 | return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; | 641 | return domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE; |
641 | } | 642 | } |
642 | 643 | ||
644 | static inline int domain_type_is_si(struct dmar_domain *domain) | ||
645 | { | ||
646 | return domain->flags & DOMAIN_FLAG_STATIC_IDENTITY; | ||
647 | } | ||
648 | |||
643 | static inline int domain_type_is_vm_or_si(struct dmar_domain *domain) | 649 | static inline int domain_type_is_vm_or_si(struct dmar_domain *domain) |
644 | { | 650 | { |
645 | return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE | | 651 | return domain->flags & (DOMAIN_FLAG_VIRTUAL_MACHINE | |
@@ -1907,21 +1913,23 @@ static void domain_exit(struct dmar_domain *domain) | |||
1907 | 1913 | ||
1908 | static int domain_context_mapping_one(struct dmar_domain *domain, | 1914 | static int domain_context_mapping_one(struct dmar_domain *domain, |
1909 | struct intel_iommu *iommu, | 1915 | struct intel_iommu *iommu, |
1910 | u8 bus, u8 devfn, int translation) | 1916 | u8 bus, u8 devfn) |
1911 | { | 1917 | { |
1918 | int translation = CONTEXT_TT_MULTI_LEVEL; | ||
1919 | struct device_domain_info *info = NULL; | ||
1912 | struct context_entry *context; | 1920 | struct context_entry *context; |
1913 | unsigned long flags; | 1921 | unsigned long flags; |
1914 | struct dma_pte *pgd; | 1922 | struct dma_pte *pgd; |
1915 | int id; | 1923 | int id; |
1916 | int agaw; | 1924 | int agaw; |
1917 | struct device_domain_info *info = NULL; | 1925 | |
1926 | if (hw_pass_through && domain_type_is_si(domain)) | ||
1927 | translation = CONTEXT_TT_PASS_THROUGH; | ||
1918 | 1928 | ||
1919 | pr_debug("Set context mapping for %02x:%02x.%d\n", | 1929 | pr_debug("Set context mapping for %02x:%02x.%d\n", |
1920 | bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); | 1930 | bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); |
1921 | 1931 | ||
1922 | BUG_ON(!domain->pgd); | 1932 | BUG_ON(!domain->pgd); |
1923 | BUG_ON(translation != CONTEXT_TT_PASS_THROUGH && | ||
1924 | translation != CONTEXT_TT_MULTI_LEVEL); | ||
1925 | 1933 | ||
1926 | spin_lock_irqsave(&iommu->lock, flags); | 1934 | spin_lock_irqsave(&iommu->lock, flags); |
1927 | context = iommu_context_addr(iommu, bus, devfn, 1); | 1935 | context = iommu_context_addr(iommu, bus, devfn, 1); |
@@ -2013,7 +2021,6 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
2013 | struct domain_context_mapping_data { | 2021 | struct domain_context_mapping_data { |
2014 | struct dmar_domain *domain; | 2022 | struct dmar_domain *domain; |
2015 | struct intel_iommu *iommu; | 2023 | struct intel_iommu *iommu; |
2016 | int translation; | ||
2017 | }; | 2024 | }; |
2018 | 2025 | ||
2019 | static int domain_context_mapping_cb(struct pci_dev *pdev, | 2026 | static int domain_context_mapping_cb(struct pci_dev *pdev, |
@@ -2022,13 +2029,11 @@ static int domain_context_mapping_cb(struct pci_dev *pdev, | |||
2022 | struct domain_context_mapping_data *data = opaque; | 2029 | struct domain_context_mapping_data *data = opaque; |
2023 | 2030 | ||
2024 | return domain_context_mapping_one(data->domain, data->iommu, | 2031 | return domain_context_mapping_one(data->domain, data->iommu, |
2025 | PCI_BUS_NUM(alias), alias & 0xff, | 2032 | PCI_BUS_NUM(alias), alias & 0xff); |
2026 | data->translation); | ||
2027 | } | 2033 | } |
2028 | 2034 | ||
2029 | static int | 2035 | static int |
2030 | domain_context_mapping(struct dmar_domain *domain, struct device *dev, | 2036 | domain_context_mapping(struct dmar_domain *domain, struct device *dev) |
2031 | int translation) | ||
2032 | { | 2037 | { |
2033 | struct intel_iommu *iommu; | 2038 | struct intel_iommu *iommu; |
2034 | u8 bus, devfn; | 2039 | u8 bus, devfn; |
@@ -2039,12 +2044,10 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev, | |||
2039 | return -ENODEV; | 2044 | return -ENODEV; |
2040 | 2045 | ||
2041 | if (!dev_is_pci(dev)) | 2046 | if (!dev_is_pci(dev)) |
2042 | return domain_context_mapping_one(domain, iommu, bus, devfn, | 2047 | return domain_context_mapping_one(domain, iommu, bus, devfn); |
2043 | translation); | ||
2044 | 2048 | ||
2045 | data.domain = domain; | 2049 | data.domain = domain; |
2046 | data.iommu = iommu; | 2050 | data.iommu = iommu; |
2047 | data.translation = translation; | ||
2048 | 2051 | ||
2049 | return pci_for_each_dma_alias(to_pci_dev(dev), | 2052 | return pci_for_each_dma_alias(to_pci_dev(dev), |
2050 | &domain_context_mapping_cb, &data); | 2053 | &domain_context_mapping_cb, &data); |
@@ -2511,7 +2514,7 @@ static int iommu_prepare_identity_map(struct device *dev, | |||
2511 | goto error; | 2514 | goto error; |
2512 | 2515 | ||
2513 | /* context entry init */ | 2516 | /* context entry init */ |
2514 | ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); | 2517 | ret = domain_context_mapping(domain, dev); |
2515 | if (ret) | 2518 | if (ret) |
2516 | goto error; | 2519 | goto error; |
2517 | 2520 | ||
@@ -2624,8 +2627,7 @@ static int identity_mapping(struct device *dev) | |||
2624 | return 0; | 2627 | return 0; |
2625 | } | 2628 | } |
2626 | 2629 | ||
2627 | static int domain_add_dev_info(struct dmar_domain *domain, | 2630 | static int domain_add_dev_info(struct dmar_domain *domain, struct device *dev) |
2628 | struct device *dev, int translation) | ||
2629 | { | 2631 | { |
2630 | struct dmar_domain *ndomain; | 2632 | struct dmar_domain *ndomain; |
2631 | struct intel_iommu *iommu; | 2633 | struct intel_iommu *iommu; |
@@ -2640,7 +2642,7 @@ static int domain_add_dev_info(struct dmar_domain *domain, | |||
2640 | if (ndomain != domain) | 2642 | if (ndomain != domain) |
2641 | return -EBUSY; | 2643 | return -EBUSY; |
2642 | 2644 | ||
2643 | ret = domain_context_mapping(domain, dev, translation); | 2645 | ret = domain_context_mapping(domain, dev); |
2644 | if (ret) { | 2646 | if (ret) { |
2645 | domain_remove_one_dev_info(domain, dev); | 2647 | domain_remove_one_dev_info(domain, dev); |
2646 | return ret; | 2648 | return ret; |
@@ -2785,9 +2787,7 @@ static int __init dev_prepare_static_identity_mapping(struct device *dev, int hw | |||
2785 | if (!iommu_should_identity_map(dev, 1)) | 2787 | if (!iommu_should_identity_map(dev, 1)) |
2786 | return 0; | 2788 | return 0; |
2787 | 2789 | ||
2788 | ret = domain_add_dev_info(si_domain, dev, | 2790 | ret = domain_add_dev_info(si_domain, dev); |
2789 | hw ? CONTEXT_TT_PASS_THROUGH : | ||
2790 | CONTEXT_TT_MULTI_LEVEL); | ||
2791 | if (!ret) | 2791 | if (!ret) |
2792 | pr_info("%s identity mapping for device %s\n", | 2792 | pr_info("%s identity mapping for device %s\n", |
2793 | hw ? "Hardware" : "Software", dev_name(dev)); | 2793 | hw ? "Hardware" : "Software", dev_name(dev)); |
@@ -3314,7 +3314,7 @@ static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) | |||
3314 | 3314 | ||
3315 | /* make sure context mapping is ok */ | 3315 | /* make sure context mapping is ok */ |
3316 | if (unlikely(!domain_context_mapped(dev))) { | 3316 | if (unlikely(!domain_context_mapped(dev))) { |
3317 | ret = domain_context_mapping(domain, dev, CONTEXT_TT_MULTI_LEVEL); | 3317 | ret = domain_context_mapping(domain, dev); |
3318 | if (ret) { | 3318 | if (ret) { |
3319 | pr_err("Domain context map for %s failed\n", | 3319 | pr_err("Domain context map for %s failed\n", |
3320 | dev_name(dev)); | 3320 | dev_name(dev)); |
@@ -3369,10 +3369,7 @@ static int iommu_no_mapping(struct device *dev) | |||
3369 | */ | 3369 | */ |
3370 | if (iommu_should_identity_map(dev, 0)) { | 3370 | if (iommu_should_identity_map(dev, 0)) { |
3371 | int ret; | 3371 | int ret; |
3372 | ret = domain_add_dev_info(si_domain, dev, | 3372 | ret = domain_add_dev_info(si_domain, dev); |
3373 | hw_pass_through ? | ||
3374 | CONTEXT_TT_PASS_THROUGH : | ||
3375 | CONTEXT_TT_MULTI_LEVEL); | ||
3376 | if (!ret) { | 3373 | if (!ret) { |
3377 | pr_info("64bit %s uses identity mapping\n", | 3374 | pr_info("64bit %s uses identity mapping\n", |
3378 | dev_name(dev)); | 3375 | dev_name(dev)); |
@@ -4810,7 +4807,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, | |||
4810 | dmar_domain->agaw--; | 4807 | dmar_domain->agaw--; |
4811 | } | 4808 | } |
4812 | 4809 | ||
4813 | return domain_add_dev_info(dmar_domain, dev, CONTEXT_TT_MULTI_LEVEL); | 4810 | return domain_add_dev_info(dmar_domain, dev); |
4814 | } | 4811 | } |
4815 | 4812 | ||
4816 | static void intel_iommu_detach_device(struct iommu_domain *domain, | 4813 | static void intel_iommu_detach_device(struct iommu_domain *domain, |