aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2015-07-22 10:50:40 -0400
committerJoerg Roedel <jroedel@suse.de>2015-08-12 10:23:36 -0400
commit55d940430ab91b89ff5fc7240555544d86475783 (patch)
treed50c552bcb3f217ef96819fee84300f03f5812f4 /drivers/iommu
parentde7e888646466e6c32cdd41124c0164cfed4abcb (diff)
iommu/vt-d: Get rid of domain->iommu_lock
When this lock is held the device_domain_lock is also required to make sure the device_domain_info does not vanish while in use. So this lock can be removed as it gives no additional protection. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/intel-iommu.c87
1 files changed, 49 insertions, 38 deletions
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c8d9bef776da..0f258f0f5ac0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -406,7 +406,6 @@ struct dmar_domain {
406 int iommu_superpage;/* Level of superpages supported: 406 int iommu_superpage;/* Level of superpages supported:
407 0 == 4KiB (no superpages), 1 == 2MiB, 407 0 == 4KiB (no superpages), 1 == 2MiB,
408 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */ 408 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
409 spinlock_t iommu_lock; /* protect iommu set in domain */
410 u64 max_addr; /* maximum mapped address */ 409 u64 max_addr; /* maximum mapped address */
411 410
412 struct iommu_domain domain; /* generic domain data structure for 411 struct iommu_domain domain; /* generic domain data structure for
@@ -476,6 +475,8 @@ static void dmar_remove_one_dev_info(struct dmar_domain *domain,
476 struct device *dev); 475 struct device *dev);
477static void domain_context_clear(struct intel_iommu *iommu, 476static void domain_context_clear(struct intel_iommu *iommu,
478 struct device *dev); 477 struct device *dev);
478static void __dmar_remove_one_dev_info(struct dmar_domain *domain,
479 struct device *dev);
479static int domain_detach_iommu(struct dmar_domain *domain, 480static int domain_detach_iommu(struct dmar_domain *domain,
480 struct intel_iommu *iommu); 481 struct intel_iommu *iommu);
481 482
@@ -1404,24 +1405,23 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
1404 u8 bus, u8 devfn) 1405 u8 bus, u8 devfn)
1405{ 1406{
1406 bool found = false; 1407 bool found = false;
1407 unsigned long flags;
1408 struct device_domain_info *info; 1408 struct device_domain_info *info;
1409 struct pci_dev *pdev; 1409 struct pci_dev *pdev;
1410 1410
1411 assert_spin_locked(&device_domain_lock);
1412
1411 if (!ecap_dev_iotlb_support(iommu->ecap)) 1413 if (!ecap_dev_iotlb_support(iommu->ecap))
1412 return NULL; 1414 return NULL;
1413 1415
1414 if (!iommu->qi) 1416 if (!iommu->qi)
1415 return NULL; 1417 return NULL;
1416 1418
1417 spin_lock_irqsave(&device_domain_lock, flags);
1418 list_for_each_entry(info, &domain->devices, link) 1419 list_for_each_entry(info, &domain->devices, link)
1419 if (info->iommu == iommu && info->bus == bus && 1420 if (info->iommu == iommu && info->bus == bus &&
1420 info->devfn == devfn) { 1421 info->devfn == devfn) {
1421 found = true; 1422 found = true;
1422 break; 1423 break;
1423 } 1424 }
1424 spin_unlock_irqrestore(&device_domain_lock, flags);
1425 1425
1426 if (!found || !info->dev || !dev_is_pci(info->dev)) 1426 if (!found || !info->dev || !dev_is_pci(info->dev))
1427 return NULL; 1427 return NULL;
@@ -1616,10 +1616,12 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1616static void disable_dmar_iommu(struct intel_iommu *iommu) 1616static void disable_dmar_iommu(struct intel_iommu *iommu)
1617{ 1617{
1618 struct device_domain_info *info, *tmp; 1618 struct device_domain_info *info, *tmp;
1619 unsigned long flags;
1619 1620
1620 if (!iommu->domains || !iommu->domain_ids) 1621 if (!iommu->domains || !iommu->domain_ids)
1621 return; 1622 return;
1622 1623
1624 spin_lock_irqsave(&device_domain_lock, flags);
1623 list_for_each_entry_safe(info, tmp, &device_domain_list, global) { 1625 list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
1624 struct dmar_domain *domain; 1626 struct dmar_domain *domain;
1625 1627
@@ -1636,6 +1638,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
1636 if (!domain_type_is_vm_or_si(domain)) 1638 if (!domain_type_is_vm_or_si(domain))
1637 domain_exit(domain); 1639 domain_exit(domain);
1638 } 1640 }
1641 spin_unlock_irqrestore(&device_domain_lock, flags);
1639 1642
1640 if (iommu->gcmd & DMA_GCMD_TE) 1643 if (iommu->gcmd & DMA_GCMD_TE)
1641 iommu_disable_translation(iommu); 1644 iommu_disable_translation(iommu);
@@ -1672,7 +1675,6 @@ static struct dmar_domain *alloc_domain(int flags)
1672 memset(domain, 0, sizeof(*domain)); 1675 memset(domain, 0, sizeof(*domain));
1673 domain->nid = -1; 1676 domain->nid = -1;
1674 domain->flags = flags; 1677 domain->flags = flags;
1675 spin_lock_init(&domain->iommu_lock);
1676 INIT_LIST_HEAD(&domain->devices); 1678 INIT_LIST_HEAD(&domain->devices);
1677 1679
1678 return domain; 1680 return domain;
@@ -1683,13 +1685,11 @@ static int domain_attach_iommu(struct dmar_domain *domain,
1683 struct intel_iommu *iommu) 1685 struct intel_iommu *iommu)
1684{ 1686{
1685 unsigned long ndomains; 1687 unsigned long ndomains;
1686 unsigned long flags; 1688 int num;
1687 int ret, num;
1688 1689
1690 assert_spin_locked(&device_domain_lock);
1689 assert_spin_locked(&iommu->lock); 1691 assert_spin_locked(&iommu->lock);
1690 1692
1691 spin_lock_irqsave(&domain->iommu_lock, flags);
1692
1693 domain->iommu_refcnt[iommu->seq_id] += 1; 1693 domain->iommu_refcnt[iommu->seq_id] += 1;
1694 domain->iommu_count += 1; 1694 domain->iommu_count += 1;
1695 if (domain->iommu_refcnt[iommu->seq_id] == 1) { 1695 if (domain->iommu_refcnt[iommu->seq_id] == 1) {
@@ -1700,8 +1700,7 @@ static int domain_attach_iommu(struct dmar_domain *domain,
1700 pr_err("%s: No free domain ids\n", iommu->name); 1700 pr_err("%s: No free domain ids\n", iommu->name);
1701 domain->iommu_refcnt[iommu->seq_id] -= 1; 1701 domain->iommu_refcnt[iommu->seq_id] -= 1;
1702 domain->iommu_count -= 1; 1702 domain->iommu_count -= 1;
1703 ret = -ENOSPC; 1703 return -ENOSPC;
1704 goto out_unlock;
1705 } 1704 }
1706 1705
1707 set_bit(num, iommu->domain_ids); 1706 set_bit(num, iommu->domain_ids);
@@ -1713,22 +1712,17 @@ static int domain_attach_iommu(struct dmar_domain *domain,
1713 domain_update_iommu_cap(domain); 1712 domain_update_iommu_cap(domain);
1714 } 1713 }
1715 1714
1716 ret = 0; 1715 return 0;
1717out_unlock:
1718 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1719
1720 return ret;
1721} 1716}
1722 1717
1723static int domain_detach_iommu(struct dmar_domain *domain, 1718static int domain_detach_iommu(struct dmar_domain *domain,
1724 struct intel_iommu *iommu) 1719 struct intel_iommu *iommu)
1725{ 1720{
1726 int num, count = INT_MAX; 1721 int num, count = INT_MAX;
1727 unsigned long flags;
1728 1722
1723 assert_spin_locked(&device_domain_lock);
1729 assert_spin_locked(&iommu->lock); 1724 assert_spin_locked(&iommu->lock);
1730 1725
1731 spin_lock_irqsave(&domain->iommu_lock, flags);
1732 domain->iommu_refcnt[iommu->seq_id] -= 1; 1726 domain->iommu_refcnt[iommu->seq_id] -= 1;
1733 count = --domain->iommu_count; 1727 count = --domain->iommu_count;
1734 if (domain->iommu_refcnt[iommu->seq_id] == 0) { 1728 if (domain->iommu_refcnt[iommu->seq_id] == 0) {
@@ -1739,7 +1733,6 @@ static int domain_detach_iommu(struct dmar_domain *domain,
1739 domain_update_iommu_cap(domain); 1733 domain_update_iommu_cap(domain);
1740 domain->iommu_did[iommu->seq_id] = 0; 1734 domain->iommu_did[iommu->seq_id] = 0;
1741 } 1735 }
1742 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1743 1736
1744 return count; 1737 return count;
1745} 1738}
@@ -1894,7 +1887,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1894 struct context_entry *context; 1887 struct context_entry *context;
1895 unsigned long flags; 1888 unsigned long flags;
1896 struct dma_pte *pgd; 1889 struct dma_pte *pgd;
1897 int agaw; 1890 int ret, agaw;
1898 1891
1899 WARN_ON(did == 0); 1892 WARN_ON(did == 0);
1900 1893
@@ -1906,16 +1899,17 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1906 1899
1907 BUG_ON(!domain->pgd); 1900 BUG_ON(!domain->pgd);
1908 1901
1909 spin_lock_irqsave(&iommu->lock, flags); 1902 spin_lock_irqsave(&device_domain_lock, flags);
1903 spin_lock(&iommu->lock);
1904
1905 ret = -ENOMEM;
1910 context = iommu_context_addr(iommu, bus, devfn, 1); 1906 context = iommu_context_addr(iommu, bus, devfn, 1);
1911 spin_unlock_irqrestore(&iommu->lock, flags);
1912 if (!context) 1907 if (!context)
1913 return -ENOMEM; 1908 goto out_unlock;
1914 spin_lock_irqsave(&iommu->lock, flags); 1909
1915 if (context_present(context)) { 1910 ret = 0;
1916 spin_unlock_irqrestore(&iommu->lock, flags); 1911 if (context_present(context))
1917 return 0; 1912 goto out_unlock;
1918 }
1919 1913
1920 pgd = domain->pgd; 1914 pgd = domain->pgd;
1921 1915
@@ -1928,11 +1922,10 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1928 */ 1922 */
1929 if (translation != CONTEXT_TT_PASS_THROUGH) { 1923 if (translation != CONTEXT_TT_PASS_THROUGH) {
1930 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { 1924 for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) {
1925 ret = -ENOMEM;
1931 pgd = phys_to_virt(dma_pte_addr(pgd)); 1926 pgd = phys_to_virt(dma_pte_addr(pgd));
1932 if (!dma_pte_present(pgd)) { 1927 if (!dma_pte_present(pgd))
1933 spin_unlock_irqrestore(&iommu->lock, flags); 1928 goto out_unlock;
1934 return -ENOMEM;
1935 }
1936 } 1929 }
1937 1930
1938 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn); 1931 info = iommu_support_dev_iotlb(domain, iommu, bus, devfn);
@@ -1971,7 +1964,12 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
1971 iommu_flush_write_buffer(iommu); 1964 iommu_flush_write_buffer(iommu);
1972 } 1965 }
1973 iommu_enable_dev_iotlb(info); 1966 iommu_enable_dev_iotlb(info);
1974 spin_unlock_irqrestore(&iommu->lock, flags); 1967
1968 ret = 0;
1969
1970out_unlock:
1971 spin_unlock(&iommu->lock);
1972 spin_unlock_irqrestore(&device_domain_lock, flags);
1975 1973
1976 return 0; 1974 return 0;
1977} 1975}
@@ -2214,9 +2212,12 @@ static inline void unlink_domain_info(struct device_domain_info *info)
2214static void domain_remove_dev_info(struct dmar_domain *domain) 2212static void domain_remove_dev_info(struct dmar_domain *domain)
2215{ 2213{
2216 struct device_domain_info *info, *tmp; 2214 struct device_domain_info *info, *tmp;
2215 unsigned long flags;
2217 2216
2217 spin_lock_irqsave(&device_domain_lock, flags);
2218 list_for_each_entry_safe(info, tmp, &domain->devices, link) 2218 list_for_each_entry_safe(info, tmp, &domain->devices, link)
2219 dmar_remove_one_dev_info(domain, info->dev); 2219 __dmar_remove_one_dev_info(domain, info->dev);
2220 spin_unlock_irqrestore(&device_domain_lock, flags);
2220} 2221}
2221 2222
2222/* 2223/*
@@ -4539,14 +4540,16 @@ static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4539 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); 4540 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4540} 4541}
4541 4542
4542static void dmar_remove_one_dev_info(struct dmar_domain *domain, 4543static void __dmar_remove_one_dev_info(struct dmar_domain *domain,
4543 struct device *dev) 4544 struct device *dev)
4544{ 4545{
4545 struct device_domain_info *info; 4546 struct device_domain_info *info;
4546 struct intel_iommu *iommu; 4547 struct intel_iommu *iommu;
4547 unsigned long flags; 4548 unsigned long flags;
4548 u8 bus, devfn; 4549 u8 bus, devfn;
4549 4550
4551 assert_spin_locked(&device_domain_lock);
4552
4550 iommu = device_to_iommu(dev, &bus, &devfn); 4553 iommu = device_to_iommu(dev, &bus, &devfn);
4551 if (!iommu) 4554 if (!iommu)
4552 return; 4555 return;
@@ -4556,9 +4559,7 @@ static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4556 if (WARN_ON(!info)) 4559 if (WARN_ON(!info))
4557 return; 4560 return;
4558 4561
4559 spin_lock_irqsave(&device_domain_lock, flags);
4560 unlink_domain_info(info); 4562 unlink_domain_info(info);
4561 spin_unlock_irqrestore(&device_domain_lock, flags);
4562 4563
4563 iommu_disable_dev_iotlb(info); 4564 iommu_disable_dev_iotlb(info);
4564 domain_context_clear(iommu, dev); 4565 domain_context_clear(iommu, dev);
@@ -4569,6 +4570,16 @@ static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4569 spin_unlock_irqrestore(&iommu->lock, flags); 4570 spin_unlock_irqrestore(&iommu->lock, flags);
4570} 4571}
4571 4572
4573static void dmar_remove_one_dev_info(struct dmar_domain *domain,
4574 struct device *dev)
4575{
4576 unsigned long flags;
4577
4578 spin_lock_irqsave(&device_domain_lock, flags);
4579 __dmar_remove_one_dev_info(domain, dev);
4580 spin_unlock_irqrestore(&device_domain_lock, flags);
4581}
4582
4572static int md_domain_init(struct dmar_domain *domain, int guest_width) 4583static int md_domain_init(struct dmar_domain *domain, int guest_width)
4573{ 4584{
4574 int adjust_width; 4585 int adjust_width;