diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/amd_iommu.c | 28 |
1 files changed, 11 insertions, 17 deletions
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c index c04dcb7f40b2..2cd5800e6888 100644 --- a/arch/x86/kernel/amd_iommu.c +++ b/arch/x86/kernel/amd_iommu.c | |||
@@ -1436,11 +1436,11 @@ static struct dma_ops_domain *find_protection_domain(u16 devid) | |||
1436 | * in this function. | 1436 | * in this function. |
1437 | */ | 1437 | */ |
1438 | static bool get_device_resources(struct device *dev, | 1438 | static bool get_device_resources(struct device *dev, |
1439 | struct amd_iommu **iommu, | ||
1440 | struct protection_domain **domain, | 1439 | struct protection_domain **domain, |
1441 | u16 *bdf) | 1440 | u16 *bdf) |
1442 | { | 1441 | { |
1443 | struct dma_ops_domain *dma_dom; | 1442 | struct dma_ops_domain *dma_dom; |
1443 | struct amd_iommu *iommu; | ||
1444 | struct pci_dev *pcidev; | 1444 | struct pci_dev *pcidev; |
1445 | u16 _bdf; | 1445 | u16 _bdf; |
1446 | 1446 | ||
@@ -1450,21 +1450,21 @@ static bool get_device_resources(struct device *dev, | |||
1450 | pcidev = to_pci_dev(dev); | 1450 | pcidev = to_pci_dev(dev); |
1451 | _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); | 1451 | _bdf = calc_devid(pcidev->bus->number, pcidev->devfn); |
1452 | *bdf = amd_iommu_alias_table[_bdf]; | 1452 | *bdf = amd_iommu_alias_table[_bdf]; |
1453 | *iommu = amd_iommu_rlookup_table[*bdf]; | 1453 | iommu = amd_iommu_rlookup_table[*bdf]; |
1454 | *domain = domain_for_device(*bdf); | 1454 | *domain = domain_for_device(*bdf); |
1455 | 1455 | ||
1456 | if (*domain == NULL) { | 1456 | if (*domain == NULL) { |
1457 | dma_dom = find_protection_domain(*bdf); | 1457 | dma_dom = find_protection_domain(*bdf); |
1458 | if (!dma_dom) | 1458 | if (!dma_dom) |
1459 | dma_dom = (*iommu)->default_dom; | 1459 | dma_dom = iommu->default_dom; |
1460 | *domain = &dma_dom->domain; | 1460 | *domain = &dma_dom->domain; |
1461 | attach_device(*iommu, *domain, *bdf); | 1461 | attach_device(iommu, *domain, *bdf); |
1462 | DUMP_printk("Using protection domain %d for device %s\n", | 1462 | DUMP_printk("Using protection domain %d for device %s\n", |
1463 | (*domain)->id, dev_name(dev)); | 1463 | (*domain)->id, dev_name(dev)); |
1464 | } | 1464 | } |
1465 | 1465 | ||
1466 | if (domain_for_device(_bdf) == NULL) | 1466 | if (domain_for_device(_bdf) == NULL) |
1467 | attach_device(*iommu, *domain, _bdf); | 1467 | attach_device(iommu, *domain, _bdf); |
1468 | 1468 | ||
1469 | return true; | 1469 | return true; |
1470 | } | 1470 | } |
@@ -1776,7 +1776,6 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
1776 | struct dma_attrs *attrs) | 1776 | struct dma_attrs *attrs) |
1777 | { | 1777 | { |
1778 | unsigned long flags; | 1778 | unsigned long flags; |
1779 | struct amd_iommu *iommu; | ||
1780 | struct protection_domain *domain; | 1779 | struct protection_domain *domain; |
1781 | u16 devid; | 1780 | u16 devid; |
1782 | dma_addr_t addr; | 1781 | dma_addr_t addr; |
@@ -1785,7 +1784,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, | |||
1785 | 1784 | ||
1786 | INC_STATS_COUNTER(cnt_map_single); | 1785 | INC_STATS_COUNTER(cnt_map_single); |
1787 | 1786 | ||
1788 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | 1787 | if (!get_device_resources(dev, &domain, &devid)) |
1789 | /* device not handled by any AMD IOMMU */ | 1788 | /* device not handled by any AMD IOMMU */ |
1790 | return (dma_addr_t)paddr; | 1789 | return (dma_addr_t)paddr; |
1791 | 1790 | ||
@@ -1815,13 +1814,12 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
1815 | enum dma_data_direction dir, struct dma_attrs *attrs) | 1814 | enum dma_data_direction dir, struct dma_attrs *attrs) |
1816 | { | 1815 | { |
1817 | unsigned long flags; | 1816 | unsigned long flags; |
1818 | struct amd_iommu *iommu; | ||
1819 | struct protection_domain *domain; | 1817 | struct protection_domain *domain; |
1820 | u16 devid; | 1818 | u16 devid; |
1821 | 1819 | ||
1822 | INC_STATS_COUNTER(cnt_unmap_single); | 1820 | INC_STATS_COUNTER(cnt_unmap_single); |
1823 | 1821 | ||
1824 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | 1822 | if (!get_device_resources(dev, &domain, &devid)) |
1825 | /* device not handled by any AMD IOMMU */ | 1823 | /* device not handled by any AMD IOMMU */ |
1826 | return; | 1824 | return; |
1827 | 1825 | ||
@@ -1864,7 +1862,6 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
1864 | struct dma_attrs *attrs) | 1862 | struct dma_attrs *attrs) |
1865 | { | 1863 | { |
1866 | unsigned long flags; | 1864 | unsigned long flags; |
1867 | struct amd_iommu *iommu; | ||
1868 | struct protection_domain *domain; | 1865 | struct protection_domain *domain; |
1869 | u16 devid; | 1866 | u16 devid; |
1870 | int i; | 1867 | int i; |
@@ -1875,7 +1872,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, | |||
1875 | 1872 | ||
1876 | INC_STATS_COUNTER(cnt_map_sg); | 1873 | INC_STATS_COUNTER(cnt_map_sg); |
1877 | 1874 | ||
1878 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | 1875 | if (!get_device_resources(dev, &domain, &devid)) |
1879 | return map_sg_no_iommu(dev, sglist, nelems, dir); | 1876 | return map_sg_no_iommu(dev, sglist, nelems, dir); |
1880 | 1877 | ||
1881 | dma_mask = *dev->dma_mask; | 1878 | dma_mask = *dev->dma_mask; |
@@ -1927,7 +1924,6 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
1927 | struct dma_attrs *attrs) | 1924 | struct dma_attrs *attrs) |
1928 | { | 1925 | { |
1929 | unsigned long flags; | 1926 | unsigned long flags; |
1930 | struct amd_iommu *iommu; | ||
1931 | struct protection_domain *domain; | 1927 | struct protection_domain *domain; |
1932 | struct scatterlist *s; | 1928 | struct scatterlist *s; |
1933 | u16 devid; | 1929 | u16 devid; |
@@ -1935,7 +1931,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
1935 | 1931 | ||
1936 | INC_STATS_COUNTER(cnt_unmap_sg); | 1932 | INC_STATS_COUNTER(cnt_unmap_sg); |
1937 | 1933 | ||
1938 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | 1934 | if (!get_device_resources(dev, &domain, &devid)) |
1939 | return; | 1935 | return; |
1940 | 1936 | ||
1941 | if (!dma_ops_domain(domain)) | 1937 | if (!dma_ops_domain(domain)) |
@@ -1962,7 +1958,6 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1962 | { | 1958 | { |
1963 | unsigned long flags; | 1959 | unsigned long flags; |
1964 | void *virt_addr; | 1960 | void *virt_addr; |
1965 | struct amd_iommu *iommu; | ||
1966 | struct protection_domain *domain; | 1961 | struct protection_domain *domain; |
1967 | u16 devid; | 1962 | u16 devid; |
1968 | phys_addr_t paddr; | 1963 | phys_addr_t paddr; |
@@ -1970,7 +1965,7 @@ static void *alloc_coherent(struct device *dev, size_t size, | |||
1970 | 1965 | ||
1971 | INC_STATS_COUNTER(cnt_alloc_coherent); | 1966 | INC_STATS_COUNTER(cnt_alloc_coherent); |
1972 | 1967 | ||
1973 | if (!get_device_resources(dev, &iommu, &domain, &devid)) { | 1968 | if (!get_device_resources(dev, &domain, &devid)) { |
1974 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); | 1969 | virt_addr = (void *)__get_free_pages(flag, get_order(size)); |
1975 | *dma_addr = __pa(virt_addr); | 1970 | *dma_addr = __pa(virt_addr); |
1976 | return virt_addr; | 1971 | return virt_addr; |
@@ -2022,13 +2017,12 @@ static void free_coherent(struct device *dev, size_t size, | |||
2022 | void *virt_addr, dma_addr_t dma_addr) | 2017 | void *virt_addr, dma_addr_t dma_addr) |
2023 | { | 2018 | { |
2024 | unsigned long flags; | 2019 | unsigned long flags; |
2025 | struct amd_iommu *iommu; | ||
2026 | struct protection_domain *domain; | 2020 | struct protection_domain *domain; |
2027 | u16 devid; | 2021 | u16 devid; |
2028 | 2022 | ||
2029 | INC_STATS_COUNTER(cnt_free_coherent); | 2023 | INC_STATS_COUNTER(cnt_free_coherent); |
2030 | 2024 | ||
2031 | if (!get_device_resources(dev, &iommu, &domain, &devid)) | 2025 | if (!get_device_resources(dev, &domain, &devid)) |
2032 | goto free_mem; | 2026 | goto free_mem; |
2033 | 2027 | ||
2034 | if (!dma_ops_domain(domain)) | 2028 | if (!dma_ops_domain(domain)) |