aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c291
-rw-r--r--drivers/iommu/amd_iommu_types.h9
2 files changed, 175 insertions, 125 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 5aa12eaabd21..748eab063857 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -46,6 +46,10 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
46static LIST_HEAD(iommu_pd_list); 46static LIST_HEAD(iommu_pd_list);
47static DEFINE_SPINLOCK(iommu_pd_list_lock); 47static DEFINE_SPINLOCK(iommu_pd_list_lock);
48 48
49/* List of all available dev_data structures */
50static LIST_HEAD(dev_data_list);
51static DEFINE_SPINLOCK(dev_data_list_lock);
52
49/* 53/*
50 * Domain for untranslated devices - only allocated 54 * Domain for untranslated devices - only allocated
51 * if iommu=pt passed on kernel cmd line. 55 * if iommu=pt passed on kernel cmd line.
@@ -69,6 +73,67 @@ static void update_domain(struct protection_domain *domain);
69 * 73 *
70 ****************************************************************************/ 74 ****************************************************************************/
71 75
76static struct iommu_dev_data *alloc_dev_data(u16 devid)
77{
78 struct iommu_dev_data *dev_data;
79 unsigned long flags;
80
81 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
82 if (!dev_data)
83 return NULL;
84
85 dev_data->devid = devid;
86 atomic_set(&dev_data->bind, 0);
87
88 spin_lock_irqsave(&dev_data_list_lock, flags);
89 list_add_tail(&dev_data->dev_data_list, &dev_data_list);
90 spin_unlock_irqrestore(&dev_data_list_lock, flags);
91
92 return dev_data;
93}
94
95static void free_dev_data(struct iommu_dev_data *dev_data)
96{
97 unsigned long flags;
98
99 spin_lock_irqsave(&dev_data_list_lock, flags);
100 list_del(&dev_data->dev_data_list);
101 spin_unlock_irqrestore(&dev_data_list_lock, flags);
102
103 kfree(dev_data);
104}
105
106static struct iommu_dev_data *search_dev_data(u16 devid)
107{
108 struct iommu_dev_data *dev_data;
109 unsigned long flags;
110
111 spin_lock_irqsave(&dev_data_list_lock, flags);
112 list_for_each_entry(dev_data, &dev_data_list, dev_data_list) {
113 if (dev_data->devid == devid)
114 goto out_unlock;
115 }
116
117 dev_data = NULL;
118
119out_unlock:
120 spin_unlock_irqrestore(&dev_data_list_lock, flags);
121
122 return dev_data;
123}
124
125static struct iommu_dev_data *find_dev_data(u16 devid)
126{
127 struct iommu_dev_data *dev_data;
128
129 dev_data = search_dev_data(devid);
130
131 if (dev_data == NULL)
132 dev_data = alloc_dev_data(devid);
133
134 return dev_data;
135}
136
72static inline u16 get_device_id(struct device *dev) 137static inline u16 get_device_id(struct device *dev)
73{ 138{
74 struct pci_dev *pdev = to_pci_dev(dev); 139 struct pci_dev *pdev = to_pci_dev(dev);
@@ -139,33 +204,31 @@ static bool check_device(struct device *dev)
139static int iommu_init_device(struct device *dev) 204static int iommu_init_device(struct device *dev)
140{ 205{
141 struct iommu_dev_data *dev_data; 206 struct iommu_dev_data *dev_data;
142 struct pci_dev *pdev; 207 u16 alias;
143 u16 devid, alias;
144 208
145 if (dev->archdata.iommu) 209 if (dev->archdata.iommu)
146 return 0; 210 return 0;
147 211
148 dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL); 212 dev_data = find_dev_data(get_device_id(dev));
149 if (!dev_data) 213 if (!dev_data)
150 return -ENOMEM; 214 return -ENOMEM;
151 215
152 dev_data->dev = dev; 216 alias = amd_iommu_alias_table[dev_data->devid];
217 if (alias != dev_data->devid) {
218 struct iommu_dev_data *alias_data;
153 219
154 devid = get_device_id(dev); 220 alias_data = find_dev_data(alias);
155 alias = amd_iommu_alias_table[devid]; 221 if (alias_data == NULL) {
156 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); 222 pr_err("AMD-Vi: Warning: Unhandled device %s\n",
157 if (pdev) 223 dev_name(dev));
158 dev_data->alias = &pdev->dev; 224 free_dev_data(dev_data);
159 else { 225 return -ENOTSUPP;
160 kfree(dev_data); 226 }
161 return -ENOTSUPP; 227 dev_data->alias_data = alias_data;
162 } 228 }
163 229
164 atomic_set(&dev_data->bind, 0);
165
166 dev->archdata.iommu = dev_data; 230 dev->archdata.iommu = dev_data;
167 231
168
169 return 0; 232 return 0;
170} 233}
171 234
@@ -185,11 +248,16 @@ static void iommu_ignore_device(struct device *dev)
185 248
186static void iommu_uninit_device(struct device *dev) 249static void iommu_uninit_device(struct device *dev)
187{ 250{
188 kfree(dev->archdata.iommu); 251 /*
252 * Nothing to do here - we keep dev_data around for unplugged devices
253 * and reuse it when the device is re-plugged - not doing so would
254 * introduce a ton of races.
255 */
189} 256}
190 257
191void __init amd_iommu_uninit_devices(void) 258void __init amd_iommu_uninit_devices(void)
192{ 259{
260 struct iommu_dev_data *dev_data, *n;
193 struct pci_dev *pdev = NULL; 261 struct pci_dev *pdev = NULL;
194 262
195 for_each_pci_dev(pdev) { 263 for_each_pci_dev(pdev) {
@@ -199,6 +267,10 @@ void __init amd_iommu_uninit_devices(void)
199 267
200 iommu_uninit_device(&pdev->dev); 268 iommu_uninit_device(&pdev->dev);
201 } 269 }
270
271 /* Free all of our dev_data structures */
272 list_for_each_entry_safe(dev_data, n, &dev_data_list, dev_data_list)
273 free_dev_data(dev_data);
202} 274}
203 275
204int __init amd_iommu_init_devices(void) 276int __init amd_iommu_init_devices(void)
@@ -655,19 +727,17 @@ void iommu_flush_all_caches(struct amd_iommu *iommu)
655/* 727/*
656 * Command send function for flushing on-device TLB 728 * Command send function for flushing on-device TLB
657 */ 729 */
658static int device_flush_iotlb(struct device *dev, u64 address, size_t size) 730static int device_flush_iotlb(struct iommu_dev_data *dev_data,
731 u64 address, size_t size)
659{ 732{
660 struct pci_dev *pdev = to_pci_dev(dev);
661 struct amd_iommu *iommu; 733 struct amd_iommu *iommu;
662 struct iommu_cmd cmd; 734 struct iommu_cmd cmd;
663 u16 devid;
664 int qdep; 735 int qdep;
665 736
666 qdep = pci_ats_queue_depth(pdev); 737 qdep = dev_data->ats.qdep;
667 devid = get_device_id(dev); 738 iommu = amd_iommu_rlookup_table[dev_data->devid];
668 iommu = amd_iommu_rlookup_table[devid];
669 739
670 build_inv_iotlb_pages(&cmd, devid, qdep, address, size); 740 build_inv_iotlb_pages(&cmd, dev_data->devid, qdep, address, size);
671 741
672 return iommu_queue_command(iommu, &cmd); 742 return iommu_queue_command(iommu, &cmd);
673} 743}
@@ -675,23 +745,19 @@ static int device_flush_iotlb(struct device *dev, u64 address, size_t size)
675/* 745/*
676 * Command send function for invalidating a device table entry 746 * Command send function for invalidating a device table entry
677 */ 747 */
678static int device_flush_dte(struct device *dev) 748static int device_flush_dte(struct iommu_dev_data *dev_data)
679{ 749{
680 struct amd_iommu *iommu; 750 struct amd_iommu *iommu;
681 struct pci_dev *pdev;
682 u16 devid;
683 int ret; 751 int ret;
684 752
685 pdev = to_pci_dev(dev); 753 iommu = amd_iommu_rlookup_table[dev_data->devid];
686 devid = get_device_id(dev);
687 iommu = amd_iommu_rlookup_table[devid];
688 754
689 ret = iommu_flush_dte(iommu, devid); 755 ret = iommu_flush_dte(iommu, dev_data->devid);
690 if (ret) 756 if (ret)
691 return ret; 757 return ret;
692 758
693 if (pci_ats_enabled(pdev)) 759 if (dev_data->ats.enabled)
694 ret = device_flush_iotlb(dev, 0, ~0UL); 760 ret = device_flush_iotlb(dev_data, 0, ~0UL);
695 761
696 return ret; 762 return ret;
697} 763}
@@ -722,12 +788,11 @@ static void __domain_flush_pages(struct protection_domain *domain,
722 } 788 }
723 789
724 list_for_each_entry(dev_data, &domain->dev_list, list) { 790 list_for_each_entry(dev_data, &domain->dev_list, list) {
725 struct pci_dev *pdev = to_pci_dev(dev_data->dev);
726 791
727 if (!pci_ats_enabled(pdev)) 792 if (!dev_data->ats.enabled)
728 continue; 793 continue;
729 794
730 ret |= device_flush_iotlb(dev_data->dev, address, size); 795 ret |= device_flush_iotlb(dev_data, address, size);
731 } 796 }
732 797
733 WARN_ON(ret); 798 WARN_ON(ret);
@@ -779,7 +844,7 @@ static void domain_flush_devices(struct protection_domain *domain)
779 spin_lock_irqsave(&domain->lock, flags); 844 spin_lock_irqsave(&domain->lock, flags);
780 845
781 list_for_each_entry(dev_data, &domain->dev_list, list) 846 list_for_each_entry(dev_data, &domain->dev_list, list)
782 device_flush_dte(dev_data->dev); 847 device_flush_dte(dev_data);
783 848
784 spin_unlock_irqrestore(&domain->lock, flags); 849 spin_unlock_irqrestore(&domain->lock, flags);
785} 850}
@@ -1527,44 +1592,33 @@ static void clear_dte_entry(u16 devid)
1527 amd_iommu_apply_erratum_63(devid); 1592 amd_iommu_apply_erratum_63(devid);
1528} 1593}
1529 1594
1530static void do_attach(struct device *dev, struct protection_domain *domain) 1595static void do_attach(struct iommu_dev_data *dev_data,
1596 struct protection_domain *domain)
1531{ 1597{
1532 struct iommu_dev_data *dev_data;
1533 struct amd_iommu *iommu; 1598 struct amd_iommu *iommu;
1534 struct pci_dev *pdev; 1599 bool ats;
1535 bool ats = false;
1536 u16 devid;
1537
1538 devid = get_device_id(dev);
1539 iommu = amd_iommu_rlookup_table[devid];
1540 dev_data = get_dev_data(dev);
1541 pdev = to_pci_dev(dev);
1542 1600
1543 if (amd_iommu_iotlb_sup) 1601 iommu = amd_iommu_rlookup_table[dev_data->devid];
1544 ats = pci_ats_enabled(pdev); 1602 ats = dev_data->ats.enabled;
1545 1603
1546 /* Update data structures */ 1604 /* Update data structures */
1547 dev_data->domain = domain; 1605 dev_data->domain = domain;
1548 list_add(&dev_data->list, &domain->dev_list); 1606 list_add(&dev_data->list, &domain->dev_list);
1549 set_dte_entry(devid, domain, ats); 1607 set_dte_entry(dev_data->devid, domain, ats);
1550 1608
1551 /* Do reference counting */ 1609 /* Do reference counting */
1552 domain->dev_iommu[iommu->index] += 1; 1610 domain->dev_iommu[iommu->index] += 1;
1553 domain->dev_cnt += 1; 1611 domain->dev_cnt += 1;
1554 1612
1555 /* Flush the DTE entry */ 1613 /* Flush the DTE entry */
1556 device_flush_dte(dev); 1614 device_flush_dte(dev_data);
1557} 1615}
1558 1616
1559static void do_detach(struct device *dev) 1617static void do_detach(struct iommu_dev_data *dev_data)
1560{ 1618{
1561 struct iommu_dev_data *dev_data;
1562 struct amd_iommu *iommu; 1619 struct amd_iommu *iommu;
1563 u16 devid;
1564 1620
1565 devid = get_device_id(dev); 1621 iommu = amd_iommu_rlookup_table[dev_data->devid];
1566 iommu = amd_iommu_rlookup_table[devid];
1567 dev_data = get_dev_data(dev);
1568 1622
1569 /* decrease reference counters */ 1623 /* decrease reference counters */
1570 dev_data->domain->dev_iommu[iommu->index] -= 1; 1624 dev_data->domain->dev_iommu[iommu->index] -= 1;
@@ -1573,52 +1627,46 @@ static void do_detach(struct device *dev)
1573 /* Update data structures */ 1627 /* Update data structures */
1574 dev_data->domain = NULL; 1628 dev_data->domain = NULL;
1575 list_del(&dev_data->list); 1629 list_del(&dev_data->list);
1576 clear_dte_entry(devid); 1630 clear_dte_entry(dev_data->devid);
1577 1631
1578 /* Flush the DTE entry */ 1632 /* Flush the DTE entry */
1579 device_flush_dte(dev); 1633 device_flush_dte(dev_data);
1580} 1634}
1581 1635
1582/* 1636/*
1583 * If a device is not yet associated with a domain, this function does 1637 * If a device is not yet associated with a domain, this function does
1584 * assigns it visible for the hardware 1638 * assigns it visible for the hardware
1585 */ 1639 */
1586static int __attach_device(struct device *dev, 1640static int __attach_device(struct iommu_dev_data *dev_data,
1587 struct protection_domain *domain) 1641 struct protection_domain *domain)
1588{ 1642{
1589 struct iommu_dev_data *dev_data, *alias_data;
1590 int ret; 1643 int ret;
1591 1644
1592 dev_data = get_dev_data(dev);
1593 alias_data = get_dev_data(dev_data->alias);
1594
1595 if (!alias_data)
1596 return -EINVAL;
1597
1598 /* lock domain */ 1645 /* lock domain */
1599 spin_lock(&domain->lock); 1646 spin_lock(&domain->lock);
1600 1647
1601 /* Some sanity checks */ 1648 if (dev_data->alias_data != NULL) {
1602 ret = -EBUSY; 1649 struct iommu_dev_data *alias_data = dev_data->alias_data;
1603 if (alias_data->domain != NULL && 1650
1604 alias_data->domain != domain) 1651 /* Some sanity checks */
1605 goto out_unlock; 1652 ret = -EBUSY;
1653 if (alias_data->domain != NULL &&
1654 alias_data->domain != domain)
1655 goto out_unlock;
1606 1656
1607 if (dev_data->domain != NULL && 1657 if (dev_data->domain != NULL &&
1608 dev_data->domain != domain) 1658 dev_data->domain != domain)
1609 goto out_unlock; 1659 goto out_unlock;
1610 1660
1611 /* Do real assignment */ 1661 /* Do real assignment */
1612 if (dev_data->alias != dev) {
1613 alias_data = get_dev_data(dev_data->alias);
1614 if (alias_data->domain == NULL) 1662 if (alias_data->domain == NULL)
1615 do_attach(dev_data->alias, domain); 1663 do_attach(alias_data, domain);
1616 1664
1617 atomic_inc(&alias_data->bind); 1665 atomic_inc(&alias_data->bind);
1618 } 1666 }
1619 1667
1620 if (dev_data->domain == NULL) 1668 if (dev_data->domain == NULL)
1621 do_attach(dev, domain); 1669 do_attach(dev_data, domain);
1622 1670
1623 atomic_inc(&dev_data->bind); 1671 atomic_inc(&dev_data->bind);
1624 1672
@@ -1640,14 +1688,19 @@ static int attach_device(struct device *dev,
1640 struct protection_domain *domain) 1688 struct protection_domain *domain)
1641{ 1689{
1642 struct pci_dev *pdev = to_pci_dev(dev); 1690 struct pci_dev *pdev = to_pci_dev(dev);
1691 struct iommu_dev_data *dev_data;
1643 unsigned long flags; 1692 unsigned long flags;
1644 int ret; 1693 int ret;
1645 1694
1646 if (amd_iommu_iotlb_sup) 1695 dev_data = get_dev_data(dev);
1647 pci_enable_ats(pdev, PAGE_SHIFT); 1696
1697 if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
1698 dev_data->ats.enabled = true;
1699 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
1700 }
1648 1701
1649 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1702 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1650 ret = __attach_device(dev, domain); 1703 ret = __attach_device(dev_data, domain);
1651 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1704 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1652 1705
1653 /* 1706 /*
@@ -1663,10 +1716,8 @@ static int attach_device(struct device *dev,
1663/* 1716/*
1664 * Removes a device from a protection domain (unlocked) 1717 * Removes a device from a protection domain (unlocked)
1665 */ 1718 */
1666static void __detach_device(struct device *dev) 1719static void __detach_device(struct iommu_dev_data *dev_data)
1667{ 1720{
1668 struct iommu_dev_data *dev_data = get_dev_data(dev);
1669 struct iommu_dev_data *alias_data;
1670 struct protection_domain *domain; 1721 struct protection_domain *domain;
1671 unsigned long flags; 1722 unsigned long flags;
1672 1723
@@ -1676,14 +1727,15 @@ static void __detach_device(struct device *dev)
1676 1727
1677 spin_lock_irqsave(&domain->lock, flags); 1728 spin_lock_irqsave(&domain->lock, flags);
1678 1729
1679 if (dev_data->alias != dev) { 1730 if (dev_data->alias_data != NULL) {
1680 alias_data = get_dev_data(dev_data->alias); 1731 struct iommu_dev_data *alias_data = dev_data->alias_data;
1732
1681 if (atomic_dec_and_test(&alias_data->bind)) 1733 if (atomic_dec_and_test(&alias_data->bind))
1682 do_detach(dev_data->alias); 1734 do_detach(alias_data);
1683 } 1735 }
1684 1736
1685 if (atomic_dec_and_test(&dev_data->bind)) 1737 if (atomic_dec_and_test(&dev_data->bind))
1686 do_detach(dev); 1738 do_detach(dev_data);
1687 1739
1688 spin_unlock_irqrestore(&domain->lock, flags); 1740 spin_unlock_irqrestore(&domain->lock, flags);
1689 1741
@@ -1694,7 +1746,7 @@ static void __detach_device(struct device *dev)
1694 */ 1746 */
1695 if (iommu_pass_through && 1747 if (iommu_pass_through &&
1696 (dev_data->domain == NULL && domain != pt_domain)) 1748 (dev_data->domain == NULL && domain != pt_domain))
1697 __attach_device(dev, pt_domain); 1749 __attach_device(dev_data, pt_domain);
1698} 1750}
1699 1751
1700/* 1752/*
@@ -1702,16 +1754,20 @@ static void __detach_device(struct device *dev)
1702 */ 1754 */
1703static void detach_device(struct device *dev) 1755static void detach_device(struct device *dev)
1704{ 1756{
1705 struct pci_dev *pdev = to_pci_dev(dev); 1757 struct iommu_dev_data *dev_data;
1706 unsigned long flags; 1758 unsigned long flags;
1707 1759
1760 dev_data = get_dev_data(dev);
1761
1708 /* lock device table */ 1762 /* lock device table */
1709 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 1763 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
1710 __detach_device(dev); 1764 __detach_device(dev_data);
1711 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1765 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1712 1766
1713 if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev)) 1767 if (dev_data->ats.enabled) {
1714 pci_disable_ats(pdev); 1768 pci_disable_ats(to_pci_dev(dev));
1769 dev_data->ats.enabled = false;
1770 }
1715} 1771}
1716 1772
1717/* 1773/*
@@ -1720,26 +1776,25 @@ static void detach_device(struct device *dev)
1720 */ 1776 */
1721static struct protection_domain *domain_for_device(struct device *dev) 1777static struct protection_domain *domain_for_device(struct device *dev)
1722{ 1778{
1723 struct protection_domain *dom; 1779 struct iommu_dev_data *dev_data;
1724 struct iommu_dev_data *dev_data, *alias_data; 1780 struct protection_domain *dom = NULL;
1725 unsigned long flags; 1781 unsigned long flags;
1726 u16 devid;
1727 1782
1728 devid = get_device_id(dev);
1729 dev_data = get_dev_data(dev); 1783 dev_data = get_dev_data(dev);
1730 alias_data = get_dev_data(dev_data->alias);
1731 if (!alias_data)
1732 return NULL;
1733 1784
1734 read_lock_irqsave(&amd_iommu_devtable_lock, flags); 1785 if (dev_data->domain)
1735 dom = dev_data->domain; 1786 return dev_data->domain;
1736 if (dom == NULL &&
1737 alias_data->domain != NULL) {
1738 __attach_device(dev, alias_data->domain);
1739 dom = alias_data->domain;
1740 }
1741 1787
1742 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 1788 if (dev_data->alias_data != NULL) {
1789 struct iommu_dev_data *alias_data = dev_data->alias_data;
1790
1791 read_lock_irqsave(&amd_iommu_devtable_lock, flags);
1792 if (alias_data->domain != NULL) {
1793 __attach_device(dev_data, alias_data->domain);
1794 dom = alias_data->domain;
1795 }
1796 read_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
1797 }
1743 1798
1744 return dom; 1799 return dom;
1745} 1800}
@@ -1799,7 +1854,6 @@ static int device_change_notifier(struct notifier_block *nb,
1799 goto out; 1854 goto out;
1800 } 1855 }
1801 1856
1802 device_flush_dte(dev);
1803 iommu_completion_wait(iommu); 1857 iommu_completion_wait(iommu);
1804 1858
1805out: 1859out:
@@ -1859,11 +1913,8 @@ static void update_device_table(struct protection_domain *domain)
1859{ 1913{
1860 struct iommu_dev_data *dev_data; 1914 struct iommu_dev_data *dev_data;
1861 1915
1862 list_for_each_entry(dev_data, &domain->dev_list, list) { 1916 list_for_each_entry(dev_data, &domain->dev_list, list)
1863 struct pci_dev *pdev = to_pci_dev(dev_data->dev); 1917 set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled);
1864 u16 devid = get_device_id(dev_data->dev);
1865 set_dte_entry(devid, domain, pci_ats_enabled(pdev));
1866 }
1867} 1918}
1868 1919
1869static void update_domain(struct protection_domain *domain) 1920static void update_domain(struct protection_domain *domain)
@@ -2498,9 +2549,7 @@ static void cleanup_domain(struct protection_domain *domain)
2498 write_lock_irqsave(&amd_iommu_devtable_lock, flags); 2549 write_lock_irqsave(&amd_iommu_devtable_lock, flags);
2499 2550
2500 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) { 2551 list_for_each_entry_safe(dev_data, next, &domain->dev_list, list) {
2501 struct device *dev = dev_data->dev; 2552 __detach_device(dev_data);
2502
2503 __detach_device(dev);
2504 atomic_set(&dev_data->bind, 0); 2553 atomic_set(&dev_data->bind, 0);
2505 } 2554 }
2506 2555
@@ -2606,7 +2655,6 @@ static void amd_iommu_detach_device(struct iommu_domain *dom,
2606 if (!iommu) 2655 if (!iommu)
2607 return; 2656 return;
2608 2657
2609 device_flush_dte(dev);
2610 iommu_completion_wait(iommu); 2658 iommu_completion_wait(iommu);
2611} 2659}
2612 2660
@@ -2617,16 +2665,13 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
2617 struct iommu_dev_data *dev_data; 2665 struct iommu_dev_data *dev_data;
2618 struct amd_iommu *iommu; 2666 struct amd_iommu *iommu;
2619 int ret; 2667 int ret;
2620 u16 devid;
2621 2668
2622 if (!check_device(dev)) 2669 if (!check_device(dev))
2623 return -EINVAL; 2670 return -EINVAL;
2624 2671
2625 dev_data = dev->archdata.iommu; 2672 dev_data = dev->archdata.iommu;
2626 2673
2627 devid = get_device_id(dev); 2674 iommu = amd_iommu_rlookup_table[dev_data->devid];
2628
2629 iommu = amd_iommu_rlookup_table[devid];
2630 if (!iommu) 2675 if (!iommu)
2631 return -EINVAL; 2676 return -EINVAL;
2632 2677
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 4c9982995414..5b9c5075e81a 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -310,10 +310,15 @@ struct protection_domain {
310 */ 310 */
311struct iommu_dev_data { 311struct iommu_dev_data {
312 struct list_head list; /* For domain->dev_list */ 312 struct list_head list; /* For domain->dev_list */
313 struct device *dev; /* Device this data belong to */ 313 struct list_head dev_data_list; /* For global dev_data_list */
314 struct device *alias; /* The Alias Device */ 314 struct iommu_dev_data *alias_data;/* The alias dev_data */
315 struct protection_domain *domain; /* Domain the device is bound to */ 315 struct protection_domain *domain; /* Domain the device is bound to */
316 atomic_t bind; /* Domain attach reverent count */ 316 atomic_t bind; /* Domain attach reverent count */
317 u16 devid; /* PCI Device ID */
318 struct {
319 bool enabled;
320 int qdep;
321 } ats; /* ATS state */
317}; 322};
318 323
319/* 324/*