aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci/intel-iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/intel-iommu.c')
-rw-r--r--drivers/pci/intel-iommu.c151
1 files changed, 133 insertions, 18 deletions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 4cb949f0ebd9..301c68fab03b 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/bitmap.h> 24#include <linux/bitmap.h>
25#include <linux/debugfs.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/irq.h> 27#include <linux/irq.h>
27#include <linux/interrupt.h> 28#include <linux/interrupt.h>
@@ -31,6 +32,7 @@
31#include <linux/dmar.h> 32#include <linux/dmar.h>
32#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
33#include <linux/mempool.h> 34#include <linux/mempool.h>
35#include <linux/timer.h>
34#include "iova.h" 36#include "iova.h"
35#include "intel-iommu.h" 37#include "intel-iommu.h"
36#include <asm/proto.h> /* force_iommu in this header in x86-64*/ 38#include <asm/proto.h> /* force_iommu in this header in x86-64*/
@@ -51,11 +53,37 @@
51 53
52#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) 54#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1)
53 55
56
57static void flush_unmaps_timeout(unsigned long data);
58
59DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
60
61static struct intel_iommu *g_iommus;
62
63#define HIGH_WATER_MARK 250
64struct deferred_flush_tables {
65 int next;
66 struct iova *iova[HIGH_WATER_MARK];
67 struct dmar_domain *domain[HIGH_WATER_MARK];
68};
69
70static struct deferred_flush_tables *deferred_flush;
71
72/* bitmap for indexing intel_iommus */
73static int g_num_of_iommus;
74
75static DEFINE_SPINLOCK(async_umap_flush_lock);
76static LIST_HEAD(unmaps_to_do);
77
78static int timer_on;
79static long list_size;
80
54static void domain_remove_dev_info(struct dmar_domain *domain); 81static void domain_remove_dev_info(struct dmar_domain *domain);
55 82
56static int dmar_disabled; 83static int dmar_disabled;
57static int __initdata dmar_map_gfx = 1; 84static int __initdata dmar_map_gfx = 1;
58static int dmar_forcedac; 85static int dmar_forcedac;
86static int intel_iommu_strict;
59 87
60#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) 88#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
61static DEFINE_SPINLOCK(device_domain_lock); 89static DEFINE_SPINLOCK(device_domain_lock);
@@ -74,9 +102,13 @@ static int __init intel_iommu_setup(char *str)
74 printk(KERN_INFO 102 printk(KERN_INFO
75 "Intel-IOMMU: disable GFX device mapping\n"); 103 "Intel-IOMMU: disable GFX device mapping\n");
76 } else if (!strncmp(str, "forcedac", 8)) { 104 } else if (!strncmp(str, "forcedac", 8)) {
77 printk (KERN_INFO 105 printk(KERN_INFO
78 "Intel-IOMMU: Forcing DAC for PCI devices\n"); 106 "Intel-IOMMU: Forcing DAC for PCI devices\n");
79 dmar_forcedac = 1; 107 dmar_forcedac = 1;
108 } else if (!strncmp(str, "strict", 6)) {
109 printk(KERN_INFO
110 "Intel-IOMMU: disable batched IOTLB flush\n");
111 intel_iommu_strict = 1;
80 } 112 }
81 113
82 str += strcspn(str, ","); 114 str += strcspn(str, ",");
@@ -966,17 +998,13 @@ static int iommu_init_domains(struct intel_iommu *iommu)
966 set_bit(0, iommu->domain_ids); 998 set_bit(0, iommu->domain_ids);
967 return 0; 999 return 0;
968} 1000}
969 1001static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
970static struct intel_iommu *alloc_iommu(struct dmar_drhd_unit *drhd) 1002 struct dmar_drhd_unit *drhd)
971{ 1003{
972 struct intel_iommu *iommu;
973 int ret; 1004 int ret;
974 int map_size; 1005 int map_size;
975 u32 ver; 1006 u32 ver;
976 1007
977 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
978 if (!iommu)
979 return NULL;
980 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); 1008 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
981 if (!iommu->reg) { 1009 if (!iommu->reg) {
982 printk(KERN_ERR "IOMMU: can't map the region\n"); 1010 printk(KERN_ERR "IOMMU: can't map the region\n");
@@ -1404,7 +1432,7 @@ static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
1404 int index; 1432 int index;
1405 1433
1406 while (dev) { 1434 while (dev) {
1407 for (index = 0; index < cnt; index ++) 1435 for (index = 0; index < cnt; index++)
1408 if (dev == devices[index]) 1436 if (dev == devices[index])
1409 return 1; 1437 return 1;
1410 1438
@@ -1669,7 +1697,7 @@ int __init init_dmars(void)
1669 struct dmar_rmrr_unit *rmrr; 1697 struct dmar_rmrr_unit *rmrr;
1670 struct pci_dev *pdev; 1698 struct pci_dev *pdev;
1671 struct intel_iommu *iommu; 1699 struct intel_iommu *iommu;
1672 int ret, unit = 0; 1700 int i, ret, unit = 0;
1673 1701
1674 /* 1702 /*
1675 * for each drhd 1703 * for each drhd
@@ -1680,7 +1708,34 @@ int __init init_dmars(void)
1680 for_each_drhd_unit(drhd) { 1708 for_each_drhd_unit(drhd) {
1681 if (drhd->ignored) 1709 if (drhd->ignored)
1682 continue; 1710 continue;
1683 iommu = alloc_iommu(drhd); 1711 g_num_of_iommus++;
1712 /*
1713 * lock not needed as this is only incremented in the single
1714 * threaded kernel __init code path all other access are read
1715 * only
1716 */
1717 }
1718
1719 g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL);
1720 if (!g_iommus) {
1721 ret = -ENOMEM;
1722 goto error;
1723 }
1724
1725 deferred_flush = kzalloc(g_num_of_iommus *
1726 sizeof(struct deferred_flush_tables), GFP_KERNEL);
1727 if (!deferred_flush) {
1728 kfree(g_iommus);
1729 ret = -ENOMEM;
1730 goto error;
1731 }
1732
1733 i = 0;
1734 for_each_drhd_unit(drhd) {
1735 if (drhd->ignored)
1736 continue;
1737 iommu = alloc_iommu(&g_iommus[i], drhd);
1738 i++;
1684 if (!iommu) { 1739 if (!iommu) {
1685 ret = -ENOMEM; 1740 ret = -ENOMEM;
1686 goto error; 1741 goto error;
@@ -1713,7 +1768,6 @@ int __init init_dmars(void)
1713 * endfor 1768 * endfor
1714 */ 1769 */
1715 for_each_rmrr_units(rmrr) { 1770 for_each_rmrr_units(rmrr) {
1716 int i;
1717 for (i = 0; i < rmrr->devices_cnt; i++) { 1771 for (i = 0; i < rmrr->devices_cnt; i++) {
1718 pdev = rmrr->devices[i]; 1772 pdev = rmrr->devices[i];
1719 /* some BIOS lists non-exist devices in DMAR table */ 1773 /* some BIOS lists non-exist devices in DMAR table */
@@ -1769,6 +1823,7 @@ error:
1769 iommu = drhd->iommu; 1823 iommu = drhd->iommu;
1770 free_iommu(iommu); 1824 free_iommu(iommu);
1771 } 1825 }
1826 kfree(g_iommus);
1772 return ret; 1827 return ret;
1773} 1828}
1774 1829
@@ -1917,6 +1972,59 @@ error:
1917 return 0; 1972 return 0;
1918} 1973}
1919 1974
1975static void flush_unmaps(void)
1976{
1977 int i, j;
1978
1979 timer_on = 0;
1980
1981 /* just flush them all */
1982 for (i = 0; i < g_num_of_iommus; i++) {
1983 if (deferred_flush[i].next) {
1984 iommu_flush_iotlb_global(&g_iommus[i], 0);
1985 for (j = 0; j < deferred_flush[i].next; j++) {
1986 __free_iova(&deferred_flush[i].domain[j]->iovad,
1987 deferred_flush[i].iova[j]);
1988 }
1989 deferred_flush[i].next = 0;
1990 }
1991 }
1992
1993 list_size = 0;
1994}
1995
1996static void flush_unmaps_timeout(unsigned long data)
1997{
1998 unsigned long flags;
1999
2000 spin_lock_irqsave(&async_umap_flush_lock, flags);
2001 flush_unmaps();
2002 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2003}
2004
2005static void add_unmap(struct dmar_domain *dom, struct iova *iova)
2006{
2007 unsigned long flags;
2008 int next, iommu_id;
2009
2010 spin_lock_irqsave(&async_umap_flush_lock, flags);
2011 if (list_size == HIGH_WATER_MARK)
2012 flush_unmaps();
2013
2014 iommu_id = dom->iommu - g_iommus;
2015 next = deferred_flush[iommu_id].next;
2016 deferred_flush[iommu_id].domain[next] = dom;
2017 deferred_flush[iommu_id].iova[next] = iova;
2018 deferred_flush[iommu_id].next++;
2019
2020 if (!timer_on) {
2021 mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
2022 timer_on = 1;
2023 }
2024 list_size++;
2025 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
2026}
2027
1920static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, 2028static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1921 size_t size, int dir) 2029 size_t size, int dir)
1922{ 2030{
@@ -1944,13 +2052,19 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr,
1944 dma_pte_clear_range(domain, start_addr, start_addr + size); 2052 dma_pte_clear_range(domain, start_addr, start_addr + size);
1945 /* free page tables */ 2053 /* free page tables */
1946 dma_pte_free_pagetable(domain, start_addr, start_addr + size); 2054 dma_pte_free_pagetable(domain, start_addr, start_addr + size);
1947 2055 if (intel_iommu_strict) {
1948 if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, 2056 if (iommu_flush_iotlb_psi(domain->iommu,
1949 size >> PAGE_SHIFT_4K, 0)) 2057 domain->id, start_addr, size >> PAGE_SHIFT_4K, 0))
1950 iommu_flush_write_buffer(domain->iommu); 2058 iommu_flush_write_buffer(domain->iommu);
1951 2059 /* free iova */
1952 /* free iova */ 2060 __free_iova(&domain->iovad, iova);
1953 __free_iova(&domain->iovad, iova); 2061 } else {
2062 add_unmap(domain, iova);
2063 /*
2064 * queue up the release of the unmap to save the 1/6th of the
2065 * cpu used up by the iotlb flush operation...
2066 */
2067 }
1954} 2068}
1955 2069
1956static void * intel_alloc_coherent(struct device *hwdev, size_t size, 2070static void * intel_alloc_coherent(struct device *hwdev, size_t size,
@@ -2289,6 +2403,7 @@ int __init intel_iommu_init(void)
2289 printk(KERN_INFO 2403 printk(KERN_INFO
2290 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); 2404 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
2291 2405
2406 init_timer(&unmap_timer);
2292 force_iommu = 1; 2407 force_iommu = 1;
2293 dma_ops = &intel_dma_ops; 2408 dma_ops = &intel_dma_ops;
2294 return 0; 2409 return 0;