aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-11-24 10:40:02 -0500
committerJoerg Roedel <joerg.roedel@amd.com>2009-11-27 08:20:21 -0500
commit94f6d190eeed91cb2bb901aa7816edd1e2405347 (patch)
treef117e8fe19592483f10ab81c8ccbb7343a0de0a2 /arch/x86/kernel/amd_iommu.c
parent15898bbcb48fc86c2baff156163df0941ecb6a15 (diff)
x86/amd-iommu: Simplify get_device_resources()
With the previous changes the get_device_resources function can be simplified even more. The only important information for the callers is the protection domain. This patch renames the function to get_domain() and let it only return the protection domain for a device. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Diffstat (limited to 'arch/x86/kernel/amd_iommu.c')
-rw-r--r--arch/x86/kernel/amd_iommu.c86
1 files changed, 36 insertions, 50 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index 75470ffee358..e5bbe9a0c192 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -1463,6 +1463,7 @@ static struct dma_ops_domain *find_protection_domain(u16 devid)
1463{ 1463{
1464 struct dma_ops_domain *entry, *ret = NULL; 1464 struct dma_ops_domain *entry, *ret = NULL;
1465 unsigned long flags; 1465 unsigned long flags;
1466 u16 alias = amd_iommu_alias_table[devid];
1466 1467
1467 if (list_empty(&iommu_pd_list)) 1468 if (list_empty(&iommu_pd_list))
1468 return NULL; 1469 return NULL;
@@ -1470,7 +1471,8 @@ static struct dma_ops_domain *find_protection_domain(u16 devid)
1470 spin_lock_irqsave(&iommu_pd_list_lock, flags); 1471 spin_lock_irqsave(&iommu_pd_list_lock, flags);
1471 1472
1472 list_for_each_entry(entry, &iommu_pd_list, list) { 1473 list_for_each_entry(entry, &iommu_pd_list, list) {
1473 if (entry->target_dev == devid) { 1474 if (entry->target_dev == devid ||
1475 entry->target_dev == alias) {
1474 ret = entry; 1476 ret = entry;
1475 break; 1477 break;
1476 } 1478 }
@@ -1488,33 +1490,31 @@ static struct dma_ops_domain *find_protection_domain(u16 devid)
1488 * If the device is not yet associated with a domain this is also done 1490 * If the device is not yet associated with a domain this is also done
1489 * in this function. 1491 * in this function.
1490 */ 1492 */
1491static bool get_device_resources(struct device *dev, 1493static struct protection_domain *get_domain(struct device *dev)
1492 struct protection_domain **domain,
1493 u16 *bdf)
1494{ 1494{
1495 struct protection_domain *domain;
1495 struct dma_ops_domain *dma_dom; 1496 struct dma_ops_domain *dma_dom;
1496 struct amd_iommu *iommu; 1497 u16 devid = get_device_id(dev);
1497 1498
1498 if (!check_device(dev)) 1499 if (!check_device(dev))
1499 return false; 1500 return ERR_PTR(-EINVAL);
1500 1501
1501 *bdf = get_device_id(dev); 1502 domain = domain_for_device(dev);
1502 *domain = domain_for_device(dev); 1503 if (domain != NULL && !dma_ops_domain(domain))
1503 iommu = amd_iommu_rlookup_table[*bdf]; 1504 return ERR_PTR(-EBUSY);
1504 1505
1505 if (*domain != NULL) 1506 if (domain != NULL)
1506 return true; 1507 return domain;
1507 1508
1508 /* Device not bount yet - bind it */ 1509 /* Device not bount yet - bind it */
1509 dma_dom = find_protection_domain(*bdf); 1510 dma_dom = find_protection_domain(devid);
1510 if (!dma_dom) 1511 if (!dma_dom)
1511 dma_dom = iommu->default_dom; 1512 dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
1512 *domain = &dma_dom->domain; 1513 attach_device(dev, &dma_dom->domain);
1513 attach_device(dev, *domain);
1514 DUMP_printk("Using protection domain %d for device %s\n", 1514 DUMP_printk("Using protection domain %d for device %s\n",
1515 (*domain)->id, dev_name(dev)); 1515 dma_dom->domain.id, dev_name(dev));
1516 1516
1517 return true; 1517 return &dma_dom->domain;
1518} 1518}
1519 1519
1520static void update_device_table(struct protection_domain *domain) 1520static void update_device_table(struct protection_domain *domain)
@@ -1825,23 +1825,22 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
1825{ 1825{
1826 unsigned long flags; 1826 unsigned long flags;
1827 struct protection_domain *domain; 1827 struct protection_domain *domain;
1828 u16 devid;
1829 dma_addr_t addr; 1828 dma_addr_t addr;
1830 u64 dma_mask; 1829 u64 dma_mask;
1831 phys_addr_t paddr = page_to_phys(page) + offset; 1830 phys_addr_t paddr = page_to_phys(page) + offset;
1832 1831
1833 INC_STATS_COUNTER(cnt_map_single); 1832 INC_STATS_COUNTER(cnt_map_single);
1834 1833
1835 if (!get_device_resources(dev, &domain, &devid)) 1834 domain = get_domain(dev);
1836 /* device not handled by any AMD IOMMU */ 1835 if (PTR_ERR(domain) == -EINVAL)
1837 return (dma_addr_t)paddr; 1836 return (dma_addr_t)paddr;
1837 else if (IS_ERR(domain))
1838 return DMA_ERROR_CODE;
1838 1839
1839 dma_mask = *dev->dma_mask; 1840 dma_mask = *dev->dma_mask;
1840 1841
1841 if (!dma_ops_domain(domain))
1842 return DMA_ERROR_CODE;
1843
1844 spin_lock_irqsave(&domain->lock, flags); 1842 spin_lock_irqsave(&domain->lock, flags);
1843
1845 addr = __map_single(dev, domain->priv, paddr, size, dir, false, 1844 addr = __map_single(dev, domain->priv, paddr, size, dir, false,
1846 dma_mask); 1845 dma_mask);
1847 if (addr == DMA_ERROR_CODE) 1846 if (addr == DMA_ERROR_CODE)
@@ -1863,15 +1862,11 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
1863{ 1862{
1864 unsigned long flags; 1863 unsigned long flags;
1865 struct protection_domain *domain; 1864 struct protection_domain *domain;
1866 u16 devid;
1867 1865
1868 INC_STATS_COUNTER(cnt_unmap_single); 1866 INC_STATS_COUNTER(cnt_unmap_single);
1869 1867
1870 if (!get_device_resources(dev, &domain, &devid)) 1868 domain = get_domain(dev);
1871 /* device not handled by any AMD IOMMU */ 1869 if (IS_ERR(domain))
1872 return;
1873
1874 if (!dma_ops_domain(domain))
1875 return; 1870 return;
1876 1871
1877 spin_lock_irqsave(&domain->lock, flags); 1872 spin_lock_irqsave(&domain->lock, flags);
@@ -1911,7 +1906,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1911{ 1906{
1912 unsigned long flags; 1907 unsigned long flags;
1913 struct protection_domain *domain; 1908 struct protection_domain *domain;
1914 u16 devid;
1915 int i; 1909 int i;
1916 struct scatterlist *s; 1910 struct scatterlist *s;
1917 phys_addr_t paddr; 1911 phys_addr_t paddr;
@@ -1920,14 +1914,14 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1920 1914
1921 INC_STATS_COUNTER(cnt_map_sg); 1915 INC_STATS_COUNTER(cnt_map_sg);
1922 1916
1923 if (!get_device_resources(dev, &domain, &devid)) 1917 domain = get_domain(dev);
1918 if (PTR_ERR(domain) == -EINVAL)
1924 return map_sg_no_iommu(dev, sglist, nelems, dir); 1919 return map_sg_no_iommu(dev, sglist, nelems, dir);
1920 else if (IS_ERR(domain))
1921 return 0;
1925 1922
1926 dma_mask = *dev->dma_mask; 1923 dma_mask = *dev->dma_mask;
1927 1924
1928 if (!dma_ops_domain(domain))
1929 return 0;
1930
1931 spin_lock_irqsave(&domain->lock, flags); 1925 spin_lock_irqsave(&domain->lock, flags);
1932 1926
1933 for_each_sg(sglist, s, nelems, i) { 1927 for_each_sg(sglist, s, nelems, i) {
@@ -1974,15 +1968,12 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
1974 unsigned long flags; 1968 unsigned long flags;
1975 struct protection_domain *domain; 1969 struct protection_domain *domain;
1976 struct scatterlist *s; 1970 struct scatterlist *s;
1977 u16 devid;
1978 int i; 1971 int i;
1979 1972
1980 INC_STATS_COUNTER(cnt_unmap_sg); 1973 INC_STATS_COUNTER(cnt_unmap_sg);
1981 1974
1982 if (!get_device_resources(dev, &domain, &devid)) 1975 domain = get_domain(dev);
1983 return; 1976 if (IS_ERR(domain))
1984
1985 if (!dma_ops_domain(domain))
1986 return; 1977 return;
1987 1978
1988 spin_lock_irqsave(&domain->lock, flags); 1979 spin_lock_irqsave(&domain->lock, flags);
@@ -2007,17 +1998,18 @@ static void *alloc_coherent(struct device *dev, size_t size,
2007 unsigned long flags; 1998 unsigned long flags;
2008 void *virt_addr; 1999 void *virt_addr;
2009 struct protection_domain *domain; 2000 struct protection_domain *domain;
2010 u16 devid;
2011 phys_addr_t paddr; 2001 phys_addr_t paddr;
2012 u64 dma_mask = dev->coherent_dma_mask; 2002 u64 dma_mask = dev->coherent_dma_mask;
2013 2003
2014 INC_STATS_COUNTER(cnt_alloc_coherent); 2004 INC_STATS_COUNTER(cnt_alloc_coherent);
2015 2005
2016 if (!get_device_resources(dev, &domain, &devid)) { 2006 domain = get_domain(dev);
2007 if (PTR_ERR(domain) == -EINVAL) {
2017 virt_addr = (void *)__get_free_pages(flag, get_order(size)); 2008 virt_addr = (void *)__get_free_pages(flag, get_order(size));
2018 *dma_addr = __pa(virt_addr); 2009 *dma_addr = __pa(virt_addr);
2019 return virt_addr; 2010 return virt_addr;
2020 } 2011 } else if (IS_ERR(domain))
2012 return NULL;
2021 2013
2022 dma_mask = dev->coherent_dma_mask; 2014 dma_mask = dev->coherent_dma_mask;
2023 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); 2015 flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
@@ -2029,9 +2021,6 @@ static void *alloc_coherent(struct device *dev, size_t size,
2029 2021
2030 paddr = virt_to_phys(virt_addr); 2022 paddr = virt_to_phys(virt_addr);
2031 2023
2032 if (!dma_ops_domain(domain))
2033 goto out_free;
2034
2035 if (!dma_mask) 2024 if (!dma_mask)
2036 dma_mask = *dev->dma_mask; 2025 dma_mask = *dev->dma_mask;
2037 2026
@@ -2066,14 +2055,11 @@ static void free_coherent(struct device *dev, size_t size,
2066{ 2055{
2067 unsigned long flags; 2056 unsigned long flags;
2068 struct protection_domain *domain; 2057 struct protection_domain *domain;
2069 u16 devid;
2070 2058
2071 INC_STATS_COUNTER(cnt_free_coherent); 2059 INC_STATS_COUNTER(cnt_free_coherent);
2072 2060
2073 if (!get_device_resources(dev, &domain, &devid)) 2061 domain = get_domain(dev);
2074 goto free_mem; 2062 if (IS_ERR(domain))
2075
2076 if (!dma_ops_domain(domain))
2077 goto free_mem; 2063 goto free_mem;
2078 2064
2079 spin_lock_irqsave(&domain->lock, flags); 2065 spin_lock_irqsave(&domain->lock, flags);