aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-23 10:52:56 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 08:16:30 -0500
commitf99c0f1c75f75924a6f19cb40a21ccefc6e8754d (patch)
tree962665b865b4bced5ea49372a8403c40fa1677f6 /arch/x86/kernel/amd_iommu.c
parent420aef8a3acfc3e75427107e23d5a9bafd17c477 (diff)
x86/amd-iommu: Use check_device in get_device_resources
Every call-place of get_device_resources calls check_device before it. So call it from get_device_resources directly and simplify the code. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c86
1 files changed, 28 insertions, 58 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index ac27b1d6bd12..c5102ebdcbd9 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1432,35 +1432,24 @@ static struct dma_ops_domain *find_protection_domain(u16 devid)
1432 * If the device is not yet associated with a domain this is also done 1432 * If the device is not yet associated with a domain this is also done
1433 * in this function. 1433 * in this function.
1434 */ 1434 */
1435static int get_device_resources(struct device *dev, 1435static bool get_device_resources(struct device *dev,
1436 struct amd_iommu **iommu, 1436 struct amd_iommu **iommu,
1437 struct protection_domain **domain, 1437 struct protection_domain **domain,
1438 u16 *bdf) 1438 u16 *bdf)
1439{ 1439{
1440 struct dma_ops_domain *dma_dom; 1440 struct dma_ops_domain *dma_dom;
1441 struct pci_dev *pcidev; 1441 struct pci_dev *pcidev;
1442 u16 _bdf; 1442 u16 _bdf;
1443 1443
1444 *iommu = NULL; 1444 if (!check_device(dev))
1445 *domain = NULL; 1445 return false;
1446 *bdf = 0xffff;
1447
1448 if (dev->bus != &pci_bus_type)
1449 return 0;
1450
1451 pcidev = to_pci_dev(dev);
1452 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1453
1454 /* device not translated by any IOMMU in the system? */
1455 if (_bdf > amd_iommu_last_bdf)
1456 return 0;
1457
1458 *bdf = amd_iommu_alias_table[_bdf];
1459 1446
1460 *iommu = amd_iommu_rlookup_table[*bdf]; 1447 pcidev = to_pci_dev(dev);
1461 if (*iommu == NULL) 1448 _bdf = calc_devid(pcidev->bus->number, pcidev->devfn);
1462 return 0; 1449 *bdf = amd_iommu_alias_table[_bdf];
1450 *iommu = amd_iommu_rlookup_table[*bdf];
1463 *domain = domain_for_device(*bdf); 1451 *domain = domain_for_device(*bdf);
1452
1464 if (*domain == NULL) { 1453 if (*domain == NULL) {
1465 dma_dom = find_protection_domain(*bdf); 1454 dma_dom = find_protection_domain(*bdf);
1466 if (!dma_dom) 1455 if (!dma_dom)
@@ -1474,7 +1463,7 @@ static int get_device_resources(struct device *dev,
1474 if (domain_for_device(_bdf) == NULL) 1463 if (domain_for_device(_bdf) == NULL)
1475 attach_device(*iommu, *domain, _bdf); 1464 attach_device(*iommu, *domain, _bdf);
1476 1465
1477 return 1; 1466 return true;
1478} 1467}
1479 1468
1480static void update_device_table(struct protection_domain *domain) 1469static void update_device_table(struct protection_domain *domain)
@@ -1797,17 +1786,12 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
1797 1786
1798 INC_STATS_COUNTER(cnt_map_single); 1787 INC_STATS_COUNTER(cnt_map_single);
1799 1788
1800 if (!check_device(dev)) 1789 if (!get_device_resources(dev, &iommu, &domain, &devid))
1801 return DMA_ERROR_CODE;
1802
1803 dma_mask = *dev->dma_mask;
1804
1805 get_device_resources(dev, &iommu, &domain, &devid);
1806
1807 if (iommu == NULL || domain == NULL)
1808 /* device not handled by any AMD IOMMU */ 1790 /* device not handled by any AMD IOMMU */
1809 return (dma_addr_t)paddr; 1791 return (dma_addr_t)paddr;
1810 1792
1793 dma_mask = *dev->dma_mask;
1794
1811 if (!dma_ops_domain(domain)) 1795 if (!dma_ops_domain(domain))
1812 return DMA_ERROR_CODE; 1796 return DMA_ERROR_CODE;
1813 1797
@@ -1838,8 +1822,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
1838 1822
1839 INC_STATS_COUNTER(cnt_unmap_single); 1823 INC_STATS_COUNTER(cnt_unmap_single);
1840 1824
1841 if (!check_device(dev) || 1825 if (!get_device_resources(dev, &iommu, &domain, &devid))
1842 !get_device_resources(dev, &iommu, &domain, &devid))
1843 /* device not handled by any AMD IOMMU */ 1826 /* device not handled by any AMD IOMMU */
1844 return; 1827 return;
1845 1828
@@ -1893,16 +1876,11 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1893 1876
1894 INC_STATS_COUNTER(cnt_map_sg); 1877 INC_STATS_COUNTER(cnt_map_sg);
1895 1878
1896 if (!check_device(dev)) 1879 if (!get_device_resources(dev, &iommu, &domain, &devid))
1897 return 0; 1880 return map_sg_no_iommu(dev, sglist, nelems, dir);
1898 1881
1899 dma_mask = *dev->dma_mask; 1882 dma_mask = *dev->dma_mask;
1900 1883
1901 get_device_resources(dev, &iommu, &domain, &devid);
1902
1903 if (!iommu || !domain)
1904 return map_sg_no_iommu(dev, sglist, nelems, dir);
1905
1906 if (!dma_ops_domain(domain)) 1884 if (!dma_ops_domain(domain))
1907 return 0; 1885 return 0;
1908 1886
@@ -1958,8 +1936,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1958 1936
1959 INC_STATS_COUNTER(cnt_unmap_sg); 1937 INC_STATS_COUNTER(cnt_unmap_sg);
1960 1938
1961 if (!check_device(dev) || 1939 if (!get_device_resources(dev, &iommu, &domain, &devid))
1962 !get_device_resources(dev, &iommu, &domain, &devid))
1963 return; 1940 return;
1964 1941
1965 if (!dma_ops_domain(domain)) 1942 if (!dma_ops_domain(domain))
@@ -1994,24 +1971,22 @@ static void *alloc_coherent(struct device *dev, size_t size,
1994 1971
1995 INC_STATS_COUNTER(cnt_alloc_coherent); 1972 INC_STATS_COUNTER(cnt_alloc_coherent);
1996 1973
1997 if (!check_device(dev)) 1974 if (!get_device_resources(dev, &iommu, &domain, &devid)) {
1998 return NULL; 1975 virt_addr = (void *)__get_free_pages(flag, get_order(size));
1976 *dma_addr = __pa(virt_addr);
1977 return virt_addr;
1978 }
1999 1979
2000 if (!get_device_resources(dev, &iommu, &domain, &devid)) 1980 dma_mask = dev->coherent_dma_mask;
2001 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 1981 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
1982 flag |= __GFP_ZERO;
2002 1983
2003 flag |= __GFP_ZERO;
2004 virt_addr = (void *)__get_free_pages(flag, get_order(size)); 1984 virt_addr = (void *)__get_free_pages(flag, get_order(size));
2005 if (!virt_addr) 1985 if (!virt_addr)
2006 return NULL; 1986 return NULL;
2007 1987
2008 paddr = virt_to_phys(virt_addr); 1988 paddr = virt_to_phys(virt_addr);
2009 1989
2010 if (!iommu || !domain) {
2011 *dma_addr = (dma_addr_t)paddr;
2012 return virt_addr;
2013 }
2014
2015 if (!dma_ops_domain(domain)) 1990 if (!dma_ops_domain(domain))
2016 goto out_free; 1991 goto out_free;
2017 1992
@@ -2054,12 +2029,7 @@ static void free_coherent(struct device *dev, size_t size,
2054 2029
2055 INC_STATS_COUNTER(cnt_free_coherent); 2030 INC_STATS_COUNTER(cnt_free_coherent);
2056 2031
2057 if (!check_device(dev)) 2032 if (!get_device_resources(dev, &iommu, &domain, &devid))
2058 return;
2059
2060 get_device_resources(dev, &iommu, &domain, &devid);
2061
2062 if (!iommu || !domain)
2063 goto free_mem; 2033 goto free_mem;
2064 2034
2065 if (!dma_ops_domain(domain)) 2035 if (!dma_ops_domain(domain))