diff options
author | Suresh Siddha <suresh.b.siddha@intel.com> | 2008-07-10 14:16:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-12 02:44:47 -0400 |
commit | c42d9f32443397aed2d37d37df161392e6a5862f (patch) | |
tree | 564126849bb2e31d2cfb719c3b03457a597733d2 /drivers/pci/intel-iommu.c | |
parent | e61d98d8dad0048619bb138b0ff996422ffae53b (diff) |
x64, x2apic/intr-remap: fix the need for sequential array allocation of iommus
Clean up the intel-iommu code related to deferred iommu flush logic. There is
no need to allocate all the iommu's as a sequential array.
This will be used later in the interrupt-remapping patch series to
allocate iommu much early and individually for each device remapping
hardware unit.
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: akpm@linux-foundation.org
Cc: arjan@linux.intel.com
Cc: andi@firstfloor.org
Cc: ebiederm@xmission.com
Cc: jbarnes@virtuousgeek.org
Cc: steiner@sgi.com
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r-- | drivers/pci/intel-iommu.c | 24 |
1 files changed, 7 insertions, 17 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 1c0270d3e2e..4d59a6a1f4d 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -58,8 +58,6 @@ static void flush_unmaps_timeout(unsigned long data); | |||
58 | 58 | ||
59 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); | 59 | DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); |
60 | 60 | ||
61 | static struct intel_iommu *g_iommus; | ||
62 | |||
63 | #define HIGH_WATER_MARK 250 | 61 | #define HIGH_WATER_MARK 250 |
64 | struct deferred_flush_tables { | 62 | struct deferred_flush_tables { |
65 | int next; | 63 | int next; |
@@ -1649,8 +1647,6 @@ int __init init_dmars(void) | |||
1649 | * endfor | 1647 | * endfor |
1650 | */ | 1648 | */ |
1651 | for_each_drhd_unit(drhd) { | 1649 | for_each_drhd_unit(drhd) { |
1652 | if (drhd->ignored) | ||
1653 | continue; | ||
1654 | g_num_of_iommus++; | 1650 | g_num_of_iommus++; |
1655 | /* | 1651 | /* |
1656 | * lock not needed as this is only incremented in the single | 1652 | * lock not needed as this is only incremented in the single |
@@ -1659,26 +1655,17 @@ int __init init_dmars(void) | |||
1659 | */ | 1655 | */ |
1660 | } | 1656 | } |
1661 | 1657 | ||
1662 | g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL); | ||
1663 | if (!g_iommus) { | ||
1664 | ret = -ENOMEM; | ||
1665 | goto error; | ||
1666 | } | ||
1667 | |||
1668 | deferred_flush = kzalloc(g_num_of_iommus * | 1658 | deferred_flush = kzalloc(g_num_of_iommus * |
1669 | sizeof(struct deferred_flush_tables), GFP_KERNEL); | 1659 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
1670 | if (!deferred_flush) { | 1660 | if (!deferred_flush) { |
1671 | kfree(g_iommus); | ||
1672 | ret = -ENOMEM; | 1661 | ret = -ENOMEM; |
1673 | goto error; | 1662 | goto error; |
1674 | } | 1663 | } |
1675 | 1664 | ||
1676 | i = 0; | ||
1677 | for_each_drhd_unit(drhd) { | 1665 | for_each_drhd_unit(drhd) { |
1678 | if (drhd->ignored) | 1666 | if (drhd->ignored) |
1679 | continue; | 1667 | continue; |
1680 | iommu = alloc_iommu(&g_iommus[i], drhd); | 1668 | iommu = alloc_iommu(drhd); |
1681 | i++; | ||
1682 | if (!iommu) { | 1669 | if (!iommu) { |
1683 | ret = -ENOMEM; | 1670 | ret = -ENOMEM; |
1684 | goto error; | 1671 | goto error; |
@@ -1770,7 +1757,6 @@ error: | |||
1770 | iommu = drhd->iommu; | 1757 | iommu = drhd->iommu; |
1771 | free_iommu(iommu); | 1758 | free_iommu(iommu); |
1772 | } | 1759 | } |
1773 | kfree(g_iommus); | ||
1774 | return ret; | 1760 | return ret; |
1775 | } | 1761 | } |
1776 | 1762 | ||
@@ -1927,7 +1913,10 @@ static void flush_unmaps(void) | |||
1927 | /* just flush them all */ | 1913 | /* just flush them all */ |
1928 | for (i = 0; i < g_num_of_iommus; i++) { | 1914 | for (i = 0; i < g_num_of_iommus; i++) { |
1929 | if (deferred_flush[i].next) { | 1915 | if (deferred_flush[i].next) { |
1930 | iommu_flush_iotlb_global(&g_iommus[i], 0); | 1916 | struct intel_iommu *iommu = |
1917 | deferred_flush[i].domain[0]->iommu; | ||
1918 | |||
1919 | iommu_flush_iotlb_global(iommu, 0); | ||
1931 | for (j = 0; j < deferred_flush[i].next; j++) { | 1920 | for (j = 0; j < deferred_flush[i].next; j++) { |
1932 | __free_iova(&deferred_flush[i].domain[j]->iovad, | 1921 | __free_iova(&deferred_flush[i].domain[j]->iovad, |
1933 | deferred_flush[i].iova[j]); | 1922 | deferred_flush[i].iova[j]); |
@@ -1957,7 +1946,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
1957 | if (list_size == HIGH_WATER_MARK) | 1946 | if (list_size == HIGH_WATER_MARK) |
1958 | flush_unmaps(); | 1947 | flush_unmaps(); |
1959 | 1948 | ||
1960 | iommu_id = dom->iommu - g_iommus; | 1949 | iommu_id = dom->iommu->seq_id; |
1950 | |||
1961 | next = deferred_flush[iommu_id].next; | 1951 | next = deferred_flush[iommu_id].next; |
1962 | deferred_flush[iommu_id].domain[next] = dom; | 1952 | deferred_flush[iommu_id].domain[next] = dom; |
1963 | deferred_flush[iommu_id].iova[next] = iova; | 1953 | deferred_flush[iommu_id].iova[next] = iova; |