diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/base/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/iommu.c | 100 | ||||
-rw-r--r-- | drivers/pci/dmar.c | 46 | ||||
-rw-r--r-- | drivers/pci/intel-iommu.c | 944 |
4 files changed, 947 insertions, 144 deletions
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index c66637392bbc..b5b8ba512b28 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile | |||
@@ -11,6 +11,7 @@ obj-$(CONFIG_FW_LOADER) += firmware_class.o | |||
11 | obj-$(CONFIG_NUMA) += node.o | 11 | obj-$(CONFIG_NUMA) += node.o |
12 | obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o | 12 | obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o |
13 | obj-$(CONFIG_SMP) += topology.o | 13 | obj-$(CONFIG_SMP) += topology.o |
14 | obj-$(CONFIG_IOMMU_API) += iommu.o | ||
14 | ifeq ($(CONFIG_SYSFS),y) | 15 | ifeq ($(CONFIG_SYSFS),y) |
15 | obj-$(CONFIG_MODULES) += module.o | 16 | obj-$(CONFIG_MODULES) += module.o |
16 | endif | 17 | endif |
diff --git a/drivers/base/iommu.c b/drivers/base/iommu.c new file mode 100644 index 000000000000..5e039d4f877c --- /dev/null +++ b/drivers/base/iommu.c | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. | ||
3 | * Author: Joerg Roedel <joerg.roedel@amd.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License version 2 as published | ||
7 | * by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
17 | */ | ||
18 | |||
19 | #include <linux/bug.h> | ||
20 | #include <linux/types.h> | ||
21 | #include <linux/errno.h> | ||
22 | #include <linux/iommu.h> | ||
23 | |||
24 | static struct iommu_ops *iommu_ops; | ||
25 | |||
26 | void register_iommu(struct iommu_ops *ops) | ||
27 | { | ||
28 | if (iommu_ops) | ||
29 | BUG(); | ||
30 | |||
31 | iommu_ops = ops; | ||
32 | } | ||
33 | |||
34 | bool iommu_found() | ||
35 | { | ||
36 | return iommu_ops != NULL; | ||
37 | } | ||
38 | EXPORT_SYMBOL_GPL(iommu_found); | ||
39 | |||
40 | struct iommu_domain *iommu_domain_alloc(void) | ||
41 | { | ||
42 | struct iommu_domain *domain; | ||
43 | int ret; | ||
44 | |||
45 | domain = kmalloc(sizeof(*domain), GFP_KERNEL); | ||
46 | if (!domain) | ||
47 | return NULL; | ||
48 | |||
49 | ret = iommu_ops->domain_init(domain); | ||
50 | if (ret) | ||
51 | goto out_free; | ||
52 | |||
53 | return domain; | ||
54 | |||
55 | out_free: | ||
56 | kfree(domain); | ||
57 | |||
58 | return NULL; | ||
59 | } | ||
60 | EXPORT_SYMBOL_GPL(iommu_domain_alloc); | ||
61 | |||
62 | void iommu_domain_free(struct iommu_domain *domain) | ||
63 | { | ||
64 | iommu_ops->domain_destroy(domain); | ||
65 | kfree(domain); | ||
66 | } | ||
67 | EXPORT_SYMBOL_GPL(iommu_domain_free); | ||
68 | |||
69 | int iommu_attach_device(struct iommu_domain *domain, struct device *dev) | ||
70 | { | ||
71 | return iommu_ops->attach_dev(domain, dev); | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(iommu_attach_device); | ||
74 | |||
75 | void iommu_detach_device(struct iommu_domain *domain, struct device *dev) | ||
76 | { | ||
77 | iommu_ops->detach_dev(domain, dev); | ||
78 | } | ||
79 | EXPORT_SYMBOL_GPL(iommu_detach_device); | ||
80 | |||
81 | int iommu_map_range(struct iommu_domain *domain, unsigned long iova, | ||
82 | phys_addr_t paddr, size_t size, int prot) | ||
83 | { | ||
84 | return iommu_ops->map(domain, iova, paddr, size, prot); | ||
85 | } | ||
86 | EXPORT_SYMBOL_GPL(iommu_map_range); | ||
87 | |||
88 | void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, | ||
89 | size_t size) | ||
90 | { | ||
91 | iommu_ops->unmap(domain, iova, size); | ||
92 | } | ||
93 | EXPORT_SYMBOL_GPL(iommu_unmap_range); | ||
94 | |||
95 | phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, | ||
96 | unsigned long iova) | ||
97 | { | ||
98 | return iommu_ops->iova_to_phys(domain, iova); | ||
99 | } | ||
100 | EXPORT_SYMBOL_GPL(iommu_iova_to_phys); | ||
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index 691b3adeb870..f5a662a50acb 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c | |||
@@ -191,26 +191,17 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) | |||
191 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) | 191 | static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) |
192 | { | 192 | { |
193 | struct acpi_dmar_hardware_unit *drhd; | 193 | struct acpi_dmar_hardware_unit *drhd; |
194 | static int include_all; | ||
195 | int ret = 0; | 194 | int ret = 0; |
196 | 195 | ||
197 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; | 196 | drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; |
198 | 197 | ||
199 | if (!dmaru->include_all) | 198 | if (dmaru->include_all) |
200 | ret = dmar_parse_dev_scope((void *)(drhd + 1), | 199 | return 0; |
200 | |||
201 | ret = dmar_parse_dev_scope((void *)(drhd + 1), | ||
201 | ((void *)drhd) + drhd->header.length, | 202 | ((void *)drhd) + drhd->header.length, |
202 | &dmaru->devices_cnt, &dmaru->devices, | 203 | &dmaru->devices_cnt, &dmaru->devices, |
203 | drhd->segment); | 204 | drhd->segment); |
204 | else { | ||
205 | /* Only allow one INCLUDE_ALL */ | ||
206 | if (include_all) { | ||
207 | printk(KERN_WARNING PREFIX "Only one INCLUDE_ALL " | ||
208 | "device scope is allowed\n"); | ||
209 | ret = -EINVAL; | ||
210 | } | ||
211 | include_all = 1; | ||
212 | } | ||
213 | |||
214 | if (ret) { | 205 | if (ret) { |
215 | list_del(&dmaru->list); | 206 | list_del(&dmaru->list); |
216 | kfree(dmaru); | 207 | kfree(dmaru); |
@@ -384,12 +375,21 @@ int dmar_pci_device_match(struct pci_dev *devices[], int cnt, | |||
384 | struct dmar_drhd_unit * | 375 | struct dmar_drhd_unit * |
385 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | 376 | dmar_find_matched_drhd_unit(struct pci_dev *dev) |
386 | { | 377 | { |
387 | struct dmar_drhd_unit *drhd = NULL; | 378 | struct dmar_drhd_unit *dmaru = NULL; |
379 | struct acpi_dmar_hardware_unit *drhd; | ||
388 | 380 | ||
389 | list_for_each_entry(drhd, &dmar_drhd_units, list) { | 381 | list_for_each_entry(dmaru, &dmar_drhd_units, list) { |
390 | if (drhd->include_all || dmar_pci_device_match(drhd->devices, | 382 | drhd = container_of(dmaru->hdr, |
391 | drhd->devices_cnt, dev)) | 383 | struct acpi_dmar_hardware_unit, |
392 | return drhd; | 384 | header); |
385 | |||
386 | if (dmaru->include_all && | ||
387 | drhd->segment == pci_domain_nr(dev->bus)) | ||
388 | return dmaru; | ||
389 | |||
390 | if (dmar_pci_device_match(dmaru->devices, | ||
391 | dmaru->devices_cnt, dev)) | ||
392 | return dmaru; | ||
393 | } | 393 | } |
394 | 394 | ||
395 | return NULL; | 395 | return NULL; |
@@ -491,6 +491,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
491 | int map_size; | 491 | int map_size; |
492 | u32 ver; | 492 | u32 ver; |
493 | static int iommu_allocated = 0; | 493 | static int iommu_allocated = 0; |
494 | int agaw; | ||
494 | 495 | ||
495 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | 496 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
496 | if (!iommu) | 497 | if (!iommu) |
@@ -506,6 +507,15 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) | |||
506 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | 507 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); |
507 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | 508 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); |
508 | 509 | ||
510 | agaw = iommu_calculate_agaw(iommu); | ||
511 | if (agaw < 0) { | ||
512 | printk(KERN_ERR | ||
513 | "Cannot get a valid agaw for iommu (seq_id = %d)\n", | ||
514 | iommu->seq_id); | ||
515 | goto error; | ||
516 | } | ||
517 | iommu->agaw = agaw; | ||
518 | |||
509 | /* the registers might be more than one page */ | 519 | /* the registers might be more than one page */ |
510 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | 520 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), |
511 | cap_max_fault_reg_offset(iommu->cap)); | 521 | cap_max_fault_reg_offset(iommu->cap)); |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 5c8baa43ac9c..235fb7a5a8a5 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/irq.h> | 28 | #include <linux/irq.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/sysdev.h> | ||
31 | #include <linux/spinlock.h> | 30 | #include <linux/spinlock.h> |
32 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
33 | #include <linux/dmar.h> | 32 | #include <linux/dmar.h> |
@@ -35,6 +34,7 @@ | |||
35 | #include <linux/mempool.h> | 34 | #include <linux/mempool.h> |
36 | #include <linux/timer.h> | 35 | #include <linux/timer.h> |
37 | #include <linux/iova.h> | 36 | #include <linux/iova.h> |
37 | #include <linux/iommu.h> | ||
38 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/iommu.h> | 40 | #include <asm/iommu.h> |
@@ -54,6 +54,195 @@ | |||
54 | 54 | ||
55 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) | 55 | #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) |
56 | 56 | ||
57 | #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) | ||
58 | #define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) | ||
59 | #define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) | ||
60 | |||
61 | /* global iommu list, set NULL for ignored DMAR units */ | ||
62 | static struct intel_iommu **g_iommus; | ||
63 | |||
64 | /* | ||
65 | * 0: Present | ||
66 | * 1-11: Reserved | ||
67 | * 12-63: Context Ptr (12 - (haw-1)) | ||
68 | * 64-127: Reserved | ||
69 | */ | ||
70 | struct root_entry { | ||
71 | u64 val; | ||
72 | u64 rsvd1; | ||
73 | }; | ||
74 | #define ROOT_ENTRY_NR (VTD_PAGE_SIZE/sizeof(struct root_entry)) | ||
75 | static inline bool root_present(struct root_entry *root) | ||
76 | { | ||
77 | return (root->val & 1); | ||
78 | } | ||
79 | static inline void set_root_present(struct root_entry *root) | ||
80 | { | ||
81 | root->val |= 1; | ||
82 | } | ||
83 | static inline void set_root_value(struct root_entry *root, unsigned long value) | ||
84 | { | ||
85 | root->val |= value & VTD_PAGE_MASK; | ||
86 | } | ||
87 | |||
88 | static inline struct context_entry * | ||
89 | get_context_addr_from_root(struct root_entry *root) | ||
90 | { | ||
91 | return (struct context_entry *) | ||
92 | (root_present(root)?phys_to_virt( | ||
93 | root->val & VTD_PAGE_MASK) : | ||
94 | NULL); | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * low 64 bits: | ||
99 | * 0: present | ||
100 | * 1: fault processing disable | ||
101 | * 2-3: translation type | ||
102 | * 12-63: address space root | ||
103 | * high 64 bits: | ||
104 | * 0-2: address width | ||
105 | * 3-6: aval | ||
106 | * 8-23: domain id | ||
107 | */ | ||
108 | struct context_entry { | ||
109 | u64 lo; | ||
110 | u64 hi; | ||
111 | }; | ||
112 | |||
113 | static inline bool context_present(struct context_entry *context) | ||
114 | { | ||
115 | return (context->lo & 1); | ||
116 | } | ||
117 | static inline void context_set_present(struct context_entry *context) | ||
118 | { | ||
119 | context->lo |= 1; | ||
120 | } | ||
121 | |||
122 | static inline void context_set_fault_enable(struct context_entry *context) | ||
123 | { | ||
124 | context->lo &= (((u64)-1) << 2) | 1; | ||
125 | } | ||
126 | |||
127 | #define CONTEXT_TT_MULTI_LEVEL 0 | ||
128 | |||
129 | static inline void context_set_translation_type(struct context_entry *context, | ||
130 | unsigned long value) | ||
131 | { | ||
132 | context->lo &= (((u64)-1) << 4) | 3; | ||
133 | context->lo |= (value & 3) << 2; | ||
134 | } | ||
135 | |||
136 | static inline void context_set_address_root(struct context_entry *context, | ||
137 | unsigned long value) | ||
138 | { | ||
139 | context->lo |= value & VTD_PAGE_MASK; | ||
140 | } | ||
141 | |||
142 | static inline void context_set_address_width(struct context_entry *context, | ||
143 | unsigned long value) | ||
144 | { | ||
145 | context->hi |= value & 7; | ||
146 | } | ||
147 | |||
148 | static inline void context_set_domain_id(struct context_entry *context, | ||
149 | unsigned long value) | ||
150 | { | ||
151 | context->hi |= (value & ((1 << 16) - 1)) << 8; | ||
152 | } | ||
153 | |||
154 | static inline void context_clear_entry(struct context_entry *context) | ||
155 | { | ||
156 | context->lo = 0; | ||
157 | context->hi = 0; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * 0: readable | ||
162 | * 1: writable | ||
163 | * 2-6: reserved | ||
164 | * 7: super page | ||
165 | * 8-11: available | ||
166 | * 12-63: Host physcial address | ||
167 | */ | ||
168 | struct dma_pte { | ||
169 | u64 val; | ||
170 | }; | ||
171 | |||
172 | static inline void dma_clear_pte(struct dma_pte *pte) | ||
173 | { | ||
174 | pte->val = 0; | ||
175 | } | ||
176 | |||
177 | static inline void dma_set_pte_readable(struct dma_pte *pte) | ||
178 | { | ||
179 | pte->val |= DMA_PTE_READ; | ||
180 | } | ||
181 | |||
182 | static inline void dma_set_pte_writable(struct dma_pte *pte) | ||
183 | { | ||
184 | pte->val |= DMA_PTE_WRITE; | ||
185 | } | ||
186 | |||
187 | static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) | ||
188 | { | ||
189 | pte->val = (pte->val & ~3) | (prot & 3); | ||
190 | } | ||
191 | |||
192 | static inline u64 dma_pte_addr(struct dma_pte *pte) | ||
193 | { | ||
194 | return (pte->val & VTD_PAGE_MASK); | ||
195 | } | ||
196 | |||
197 | static inline void dma_set_pte_addr(struct dma_pte *pte, u64 addr) | ||
198 | { | ||
199 | pte->val |= (addr & VTD_PAGE_MASK); | ||
200 | } | ||
201 | |||
202 | static inline bool dma_pte_present(struct dma_pte *pte) | ||
203 | { | ||
204 | return (pte->val & 3) != 0; | ||
205 | } | ||
206 | |||
207 | /* devices under the same p2p bridge are owned in one domain */ | ||
208 | #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) | ||
209 | |||
210 | /* domain represents a virtual machine, more than one devices | ||
211 | * across iommus may be owned in one domain, e.g. kvm guest. | ||
212 | */ | ||
213 | #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) | ||
214 | |||
215 | struct dmar_domain { | ||
216 | int id; /* domain id */ | ||
217 | unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ | ||
218 | |||
219 | struct list_head devices; /* all devices' list */ | ||
220 | struct iova_domain iovad; /* iova's that belong to this domain */ | ||
221 | |||
222 | struct dma_pte *pgd; /* virtual address */ | ||
223 | spinlock_t mapping_lock; /* page table lock */ | ||
224 | int gaw; /* max guest address width */ | ||
225 | |||
226 | /* adjusted guest address width, 0 is level 2 30-bit */ | ||
227 | int agaw; | ||
228 | |||
229 | int flags; /* flags to find out type of domain */ | ||
230 | |||
231 | int iommu_coherency;/* indicate coherency of iommu access */ | ||
232 | int iommu_count; /* reference count of iommu */ | ||
233 | spinlock_t iommu_lock; /* protect iommu set in domain */ | ||
234 | u64 max_addr; /* maximum mapped address */ | ||
235 | }; | ||
236 | |||
237 | /* PCI domain-device relationship */ | ||
238 | struct device_domain_info { | ||
239 | struct list_head link; /* link to domain siblings */ | ||
240 | struct list_head global; /* link to global list */ | ||
241 | u8 bus; /* PCI bus numer */ | ||
242 | u8 devfn; /* PCI devfn number */ | ||
243 | struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ | ||
244 | struct dmar_domain *domain; /* pointer to domain */ | ||
245 | }; | ||
57 | 246 | ||
58 | static void flush_unmaps_timeout(unsigned long data); | 247 | static void flush_unmaps_timeout(unsigned long data); |
59 | 248 | ||
@@ -88,6 +277,8 @@ static int intel_iommu_strict; | |||
88 | static DEFINE_SPINLOCK(device_domain_lock); | 277 | static DEFINE_SPINLOCK(device_domain_lock); |
89 | static LIST_HEAD(device_domain_list); | 278 | static LIST_HEAD(device_domain_list); |
90 | 279 | ||
280 | static struct iommu_ops intel_iommu_ops; | ||
281 | |||
91 | static int __init intel_iommu_setup(char *str) | 282 | static int __init intel_iommu_setup(char *str) |
92 | { | 283 | { |
93 | if (!str) | 284 | if (!str) |
@@ -184,6 +375,87 @@ void free_iova_mem(struct iova *iova) | |||
184 | kmem_cache_free(iommu_iova_cache, iova); | 375 | kmem_cache_free(iommu_iova_cache, iova); |
185 | } | 376 | } |
186 | 377 | ||
378 | |||
379 | static inline int width_to_agaw(int width); | ||
380 | |||
381 | /* calculate agaw for each iommu. | ||
382 | * "SAGAW" may be different across iommus, use a default agaw, and | ||
383 | * get a supported less agaw for iommus that don't support the default agaw. | ||
384 | */ | ||
385 | int iommu_calculate_agaw(struct intel_iommu *iommu) | ||
386 | { | ||
387 | unsigned long sagaw; | ||
388 | int agaw = -1; | ||
389 | |||
390 | sagaw = cap_sagaw(iommu->cap); | ||
391 | for (agaw = width_to_agaw(DEFAULT_DOMAIN_ADDRESS_WIDTH); | ||
392 | agaw >= 0; agaw--) { | ||
393 | if (test_bit(agaw, &sagaw)) | ||
394 | break; | ||
395 | } | ||
396 | |||
397 | return agaw; | ||
398 | } | ||
399 | |||
400 | /* in native case, each domain is related to only one iommu */ | ||
401 | static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) | ||
402 | { | ||
403 | int iommu_id; | ||
404 | |||
405 | BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); | ||
406 | |||
407 | iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | ||
408 | if (iommu_id < 0 || iommu_id >= g_num_of_iommus) | ||
409 | return NULL; | ||
410 | |||
411 | return g_iommus[iommu_id]; | ||
412 | } | ||
413 | |||
414 | /* "Coherency" capability may be different across iommus */ | ||
415 | static void domain_update_iommu_coherency(struct dmar_domain *domain) | ||
416 | { | ||
417 | int i; | ||
418 | |||
419 | domain->iommu_coherency = 1; | ||
420 | |||
421 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | ||
422 | for (; i < g_num_of_iommus; ) { | ||
423 | if (!ecap_coherent(g_iommus[i]->ecap)) { | ||
424 | domain->iommu_coherency = 0; | ||
425 | break; | ||
426 | } | ||
427 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); | ||
428 | } | ||
429 | } | ||
430 | |||
431 | static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) | ||
432 | { | ||
433 | struct dmar_drhd_unit *drhd = NULL; | ||
434 | int i; | ||
435 | |||
436 | for_each_drhd_unit(drhd) { | ||
437 | if (drhd->ignored) | ||
438 | continue; | ||
439 | |||
440 | for (i = 0; i < drhd->devices_cnt; i++) | ||
441 | if (drhd->devices[i]->bus->number == bus && | ||
442 | drhd->devices[i]->devfn == devfn) | ||
443 | return drhd->iommu; | ||
444 | |||
445 | if (drhd->include_all) | ||
446 | return drhd->iommu; | ||
447 | } | ||
448 | |||
449 | return NULL; | ||
450 | } | ||
451 | |||
452 | static void domain_flush_cache(struct dmar_domain *domain, | ||
453 | void *addr, int size) | ||
454 | { | ||
455 | if (!domain->iommu_coherency) | ||
456 | clflush_cache_range(addr, size); | ||
457 | } | ||
458 | |||
187 | /* Gets context entry for a given bus and devfn */ | 459 | /* Gets context entry for a given bus and devfn */ |
188 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, | 460 | static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, |
189 | u8 bus, u8 devfn) | 461 | u8 bus, u8 devfn) |
@@ -226,7 +498,7 @@ static int device_context_mapped(struct intel_iommu *iommu, u8 bus, u8 devfn) | |||
226 | ret = 0; | 498 | ret = 0; |
227 | goto out; | 499 | goto out; |
228 | } | 500 | } |
229 | ret = context_present(context[devfn]); | 501 | ret = context_present(&context[devfn]); |
230 | out: | 502 | out: |
231 | spin_unlock_irqrestore(&iommu->lock, flags); | 503 | spin_unlock_irqrestore(&iommu->lock, flags); |
232 | return ret; | 504 | return ret; |
@@ -242,7 +514,7 @@ static void clear_context_table(struct intel_iommu *iommu, u8 bus, u8 devfn) | |||
242 | root = &iommu->root_entry[bus]; | 514 | root = &iommu->root_entry[bus]; |
243 | context = get_context_addr_from_root(root); | 515 | context = get_context_addr_from_root(root); |
244 | if (context) { | 516 | if (context) { |
245 | context_clear_entry(context[devfn]); | 517 | context_clear_entry(&context[devfn]); |
246 | __iommu_flush_cache(iommu, &context[devfn], \ | 518 | __iommu_flush_cache(iommu, &context[devfn], \ |
247 | sizeof(*context)); | 519 | sizeof(*context)); |
248 | } | 520 | } |
@@ -339,7 +611,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
339 | if (level == 1) | 611 | if (level == 1) |
340 | break; | 612 | break; |
341 | 613 | ||
342 | if (!dma_pte_present(*pte)) { | 614 | if (!dma_pte_present(pte)) { |
343 | tmp_page = alloc_pgtable_page(); | 615 | tmp_page = alloc_pgtable_page(); |
344 | 616 | ||
345 | if (!tmp_page) { | 617 | if (!tmp_page) { |
@@ -347,18 +619,17 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) | |||
347 | flags); | 619 | flags); |
348 | return NULL; | 620 | return NULL; |
349 | } | 621 | } |
350 | __iommu_flush_cache(domain->iommu, tmp_page, | 622 | domain_flush_cache(domain, tmp_page, PAGE_SIZE); |
351 | PAGE_SIZE); | 623 | dma_set_pte_addr(pte, virt_to_phys(tmp_page)); |
352 | dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); | ||
353 | /* | 624 | /* |
354 | * high level table always sets r/w, last level page | 625 | * high level table always sets r/w, last level page |
355 | * table control read/write | 626 | * table control read/write |
356 | */ | 627 | */ |
357 | dma_set_pte_readable(*pte); | 628 | dma_set_pte_readable(pte); |
358 | dma_set_pte_writable(*pte); | 629 | dma_set_pte_writable(pte); |
359 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 630 | domain_flush_cache(domain, pte, sizeof(*pte)); |
360 | } | 631 | } |
361 | parent = phys_to_virt(dma_pte_addr(*pte)); | 632 | parent = phys_to_virt(dma_pte_addr(pte)); |
362 | level--; | 633 | level--; |
363 | } | 634 | } |
364 | 635 | ||
@@ -381,9 +652,9 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr, | |||
381 | if (level == total) | 652 | if (level == total) |
382 | return pte; | 653 | return pte; |
383 | 654 | ||
384 | if (!dma_pte_present(*pte)) | 655 | if (!dma_pte_present(pte)) |
385 | break; | 656 | break; |
386 | parent = phys_to_virt(dma_pte_addr(*pte)); | 657 | parent = phys_to_virt(dma_pte_addr(pte)); |
387 | total--; | 658 | total--; |
388 | } | 659 | } |
389 | return NULL; | 660 | return NULL; |
@@ -398,8 +669,8 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) | |||
398 | pte = dma_addr_level_pte(domain, addr, 1); | 669 | pte = dma_addr_level_pte(domain, addr, 1); |
399 | 670 | ||
400 | if (pte) { | 671 | if (pte) { |
401 | dma_clear_pte(*pte); | 672 | dma_clear_pte(pte); |
402 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 673 | domain_flush_cache(domain, pte, sizeof(*pte)); |
403 | } | 674 | } |
404 | } | 675 | } |
405 | 676 | ||
@@ -445,10 +716,9 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, | |||
445 | pte = dma_addr_level_pte(domain, tmp, level); | 716 | pte = dma_addr_level_pte(domain, tmp, level); |
446 | if (pte) { | 717 | if (pte) { |
447 | free_pgtable_page( | 718 | free_pgtable_page( |
448 | phys_to_virt(dma_pte_addr(*pte))); | 719 | phys_to_virt(dma_pte_addr(pte))); |
449 | dma_clear_pte(*pte); | 720 | dma_clear_pte(pte); |
450 | __iommu_flush_cache(domain->iommu, | 721 | domain_flush_cache(domain, pte, sizeof(*pte)); |
451 | pte, sizeof(*pte)); | ||
452 | } | 722 | } |
453 | tmp += level_size(level); | 723 | tmp += level_size(level); |
454 | } | 724 | } |
@@ -950,17 +1220,28 @@ static int iommu_init_domains(struct intel_iommu *iommu) | |||
950 | 1220 | ||
951 | 1221 | ||
952 | static void domain_exit(struct dmar_domain *domain); | 1222 | static void domain_exit(struct dmar_domain *domain); |
1223 | static void vm_domain_exit(struct dmar_domain *domain); | ||
953 | 1224 | ||
954 | void free_dmar_iommu(struct intel_iommu *iommu) | 1225 | void free_dmar_iommu(struct intel_iommu *iommu) |
955 | { | 1226 | { |
956 | struct dmar_domain *domain; | 1227 | struct dmar_domain *domain; |
957 | int i; | 1228 | int i; |
1229 | unsigned long flags; | ||
958 | 1230 | ||
959 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); | 1231 | i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); |
960 | for (; i < cap_ndoms(iommu->cap); ) { | 1232 | for (; i < cap_ndoms(iommu->cap); ) { |
961 | domain = iommu->domains[i]; | 1233 | domain = iommu->domains[i]; |
962 | clear_bit(i, iommu->domain_ids); | 1234 | clear_bit(i, iommu->domain_ids); |
963 | domain_exit(domain); | 1235 | |
1236 | spin_lock_irqsave(&domain->iommu_lock, flags); | ||
1237 | if (--domain->iommu_count == 0) { | ||
1238 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) | ||
1239 | vm_domain_exit(domain); | ||
1240 | else | ||
1241 | domain_exit(domain); | ||
1242 | } | ||
1243 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | ||
1244 | |||
964 | i = find_next_bit(iommu->domain_ids, | 1245 | i = find_next_bit(iommu->domain_ids, |
965 | cap_ndoms(iommu->cap), i+1); | 1246 | cap_ndoms(iommu->cap), i+1); |
966 | } | 1247 | } |
@@ -978,6 +1259,17 @@ void free_dmar_iommu(struct intel_iommu *iommu) | |||
978 | kfree(iommu->domains); | 1259 | kfree(iommu->domains); |
979 | kfree(iommu->domain_ids); | 1260 | kfree(iommu->domain_ids); |
980 | 1261 | ||
1262 | g_iommus[iommu->seq_id] = NULL; | ||
1263 | |||
1264 | /* if all iommus are freed, free g_iommus */ | ||
1265 | for (i = 0; i < g_num_of_iommus; i++) { | ||
1266 | if (g_iommus[i]) | ||
1267 | break; | ||
1268 | } | ||
1269 | |||
1270 | if (i == g_num_of_iommus) | ||
1271 | kfree(g_iommus); | ||
1272 | |||
981 | /* free context mapping */ | 1273 | /* free context mapping */ |
982 | free_context_table(iommu); | 1274 | free_context_table(iommu); |
983 | } | 1275 | } |
@@ -1006,7 +1298,9 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | |||
1006 | 1298 | ||
1007 | set_bit(num, iommu->domain_ids); | 1299 | set_bit(num, iommu->domain_ids); |
1008 | domain->id = num; | 1300 | domain->id = num; |
1009 | domain->iommu = iommu; | 1301 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); |
1302 | set_bit(iommu->seq_id, &domain->iommu_bmp); | ||
1303 | domain->flags = 0; | ||
1010 | iommu->domains[num] = domain; | 1304 | iommu->domains[num] = domain; |
1011 | spin_unlock_irqrestore(&iommu->lock, flags); | 1305 | spin_unlock_irqrestore(&iommu->lock, flags); |
1012 | 1306 | ||
@@ -1016,10 +1310,13 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) | |||
1016 | static void iommu_free_domain(struct dmar_domain *domain) | 1310 | static void iommu_free_domain(struct dmar_domain *domain) |
1017 | { | 1311 | { |
1018 | unsigned long flags; | 1312 | unsigned long flags; |
1313 | struct intel_iommu *iommu; | ||
1314 | |||
1315 | iommu = domain_get_iommu(domain); | ||
1019 | 1316 | ||
1020 | spin_lock_irqsave(&domain->iommu->lock, flags); | 1317 | spin_lock_irqsave(&iommu->lock, flags); |
1021 | clear_bit(domain->id, domain->iommu->domain_ids); | 1318 | clear_bit(domain->id, iommu->domain_ids); |
1022 | spin_unlock_irqrestore(&domain->iommu->lock, flags); | 1319 | spin_unlock_irqrestore(&iommu->lock, flags); |
1023 | } | 1320 | } |
1024 | 1321 | ||
1025 | static struct iova_domain reserved_iova_list; | 1322 | static struct iova_domain reserved_iova_list; |
@@ -1094,11 +1391,12 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1094 | 1391 | ||
1095 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | 1392 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); |
1096 | spin_lock_init(&domain->mapping_lock); | 1393 | spin_lock_init(&domain->mapping_lock); |
1394 | spin_lock_init(&domain->iommu_lock); | ||
1097 | 1395 | ||
1098 | domain_reserve_special_ranges(domain); | 1396 | domain_reserve_special_ranges(domain); |
1099 | 1397 | ||
1100 | /* calculate AGAW */ | 1398 | /* calculate AGAW */ |
1101 | iommu = domain->iommu; | 1399 | iommu = domain_get_iommu(domain); |
1102 | if (guest_width > cap_mgaw(iommu->cap)) | 1400 | if (guest_width > cap_mgaw(iommu->cap)) |
1103 | guest_width = cap_mgaw(iommu->cap); | 1401 | guest_width = cap_mgaw(iommu->cap); |
1104 | domain->gaw = guest_width; | 1402 | domain->gaw = guest_width; |
@@ -1115,6 +1413,13 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1115 | domain->agaw = agaw; | 1413 | domain->agaw = agaw; |
1116 | INIT_LIST_HEAD(&domain->devices); | 1414 | INIT_LIST_HEAD(&domain->devices); |
1117 | 1415 | ||
1416 | if (ecap_coherent(iommu->ecap)) | ||
1417 | domain->iommu_coherency = 1; | ||
1418 | else | ||
1419 | domain->iommu_coherency = 0; | ||
1420 | |||
1421 | domain->iommu_count = 1; | ||
1422 | |||
1118 | /* always allocate the top pgd */ | 1423 | /* always allocate the top pgd */ |
1119 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); | 1424 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); |
1120 | if (!domain->pgd) | 1425 | if (!domain->pgd) |
@@ -1151,28 +1456,82 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1151 | u8 bus, u8 devfn) | 1456 | u8 bus, u8 devfn) |
1152 | { | 1457 | { |
1153 | struct context_entry *context; | 1458 | struct context_entry *context; |
1154 | struct intel_iommu *iommu = domain->iommu; | ||
1155 | unsigned long flags; | 1459 | unsigned long flags; |
1460 | struct intel_iommu *iommu; | ||
1461 | struct dma_pte *pgd; | ||
1462 | unsigned long num; | ||
1463 | unsigned long ndomains; | ||
1464 | int id; | ||
1465 | int agaw; | ||
1156 | 1466 | ||
1157 | pr_debug("Set context mapping for %02x:%02x.%d\n", | 1467 | pr_debug("Set context mapping for %02x:%02x.%d\n", |
1158 | bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); | 1468 | bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); |
1159 | BUG_ON(!domain->pgd); | 1469 | BUG_ON(!domain->pgd); |
1470 | |||
1471 | iommu = device_to_iommu(bus, devfn); | ||
1472 | if (!iommu) | ||
1473 | return -ENODEV; | ||
1474 | |||
1160 | context = device_to_context_entry(iommu, bus, devfn); | 1475 | context = device_to_context_entry(iommu, bus, devfn); |
1161 | if (!context) | 1476 | if (!context) |
1162 | return -ENOMEM; | 1477 | return -ENOMEM; |
1163 | spin_lock_irqsave(&iommu->lock, flags); | 1478 | spin_lock_irqsave(&iommu->lock, flags); |
1164 | if (context_present(*context)) { | 1479 | if (context_present(context)) { |
1165 | spin_unlock_irqrestore(&iommu->lock, flags); | 1480 | spin_unlock_irqrestore(&iommu->lock, flags); |
1166 | return 0; | 1481 | return 0; |
1167 | } | 1482 | } |
1168 | 1483 | ||
1169 | context_set_domain_id(*context, domain->id); | 1484 | id = domain->id; |
1170 | context_set_address_width(*context, domain->agaw); | 1485 | pgd = domain->pgd; |
1171 | context_set_address_root(*context, virt_to_phys(domain->pgd)); | 1486 | |
1172 | context_set_translation_type(*context, CONTEXT_TT_MULTI_LEVEL); | 1487 | if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { |
1173 | context_set_fault_enable(*context); | 1488 | int found = 0; |
1174 | context_set_present(*context); | 1489 | |
1175 | __iommu_flush_cache(iommu, context, sizeof(*context)); | 1490 | /* find an available domain id for this device in iommu */ |
1491 | ndomains = cap_ndoms(iommu->cap); | ||
1492 | num = find_first_bit(iommu->domain_ids, ndomains); | ||
1493 | for (; num < ndomains; ) { | ||
1494 | if (iommu->domains[num] == domain) { | ||
1495 | id = num; | ||
1496 | found = 1; | ||
1497 | break; | ||
1498 | } | ||
1499 | num = find_next_bit(iommu->domain_ids, | ||
1500 | cap_ndoms(iommu->cap), num+1); | ||
1501 | } | ||
1502 | |||
1503 | if (found == 0) { | ||
1504 | num = find_first_zero_bit(iommu->domain_ids, ndomains); | ||
1505 | if (num >= ndomains) { | ||
1506 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
1507 | printk(KERN_ERR "IOMMU: no free domain ids\n"); | ||
1508 | return -EFAULT; | ||
1509 | } | ||
1510 | |||
1511 | set_bit(num, iommu->domain_ids); | ||
1512 | iommu->domains[num] = domain; | ||
1513 | id = num; | ||
1514 | } | ||
1515 | |||
1516 | /* Skip top levels of page tables for | ||
1517 | * iommu which has less agaw than default. | ||
1518 | */ | ||
1519 | for (agaw = domain->agaw; agaw != iommu->agaw; agaw--) { | ||
1520 | pgd = phys_to_virt(dma_pte_addr(pgd)); | ||
1521 | if (!dma_pte_present(pgd)) { | ||
1522 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
1523 | return -ENOMEM; | ||
1524 | } | ||
1525 | } | ||
1526 | } | ||
1527 | |||
1528 | context_set_domain_id(context, id); | ||
1529 | context_set_address_width(context, iommu->agaw); | ||
1530 | context_set_address_root(context, virt_to_phys(pgd)); | ||
1531 | context_set_translation_type(context, CONTEXT_TT_MULTI_LEVEL); | ||
1532 | context_set_fault_enable(context); | ||
1533 | context_set_present(context); | ||
1534 | domain_flush_cache(domain, context, sizeof(*context)); | ||
1176 | 1535 | ||
1177 | /* it's a non-present to present mapping */ | 1536 | /* it's a non-present to present mapping */ |
1178 | if (iommu->flush.flush_context(iommu, domain->id, | 1537 | if (iommu->flush.flush_context(iommu, domain->id, |
@@ -1183,6 +1542,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain, | |||
1183 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); | 1542 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); |
1184 | 1543 | ||
1185 | spin_unlock_irqrestore(&iommu->lock, flags); | 1544 | spin_unlock_irqrestore(&iommu->lock, flags); |
1545 | |||
1546 | spin_lock_irqsave(&domain->iommu_lock, flags); | ||
1547 | if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { | ||
1548 | domain->iommu_count++; | ||
1549 | domain_update_iommu_coherency(domain); | ||
1550 | } | ||
1551 | spin_unlock_irqrestore(&domain->iommu_lock, flags); | ||
1186 | return 0; | 1552 | return 0; |
1187 | } | 1553 | } |
1188 | 1554 | ||
@@ -1218,13 +1584,17 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev) | |||
1218 | tmp->bus->number, tmp->devfn); | 1584 | tmp->bus->number, tmp->devfn); |
1219 | } | 1585 | } |
1220 | 1586 | ||
1221 | static int domain_context_mapped(struct dmar_domain *domain, | 1587 | static int domain_context_mapped(struct pci_dev *pdev) |
1222 | struct pci_dev *pdev) | ||
1223 | { | 1588 | { |
1224 | int ret; | 1589 | int ret; |
1225 | struct pci_dev *tmp, *parent; | 1590 | struct pci_dev *tmp, *parent; |
1591 | struct intel_iommu *iommu; | ||
1592 | |||
1593 | iommu = device_to_iommu(pdev->bus->number, pdev->devfn); | ||
1594 | if (!iommu) | ||
1595 | return -ENODEV; | ||
1226 | 1596 | ||
1227 | ret = device_context_mapped(domain->iommu, | 1597 | ret = device_context_mapped(iommu, |
1228 | pdev->bus->number, pdev->devfn); | 1598 | pdev->bus->number, pdev->devfn); |
1229 | if (!ret) | 1599 | if (!ret) |
1230 | return ret; | 1600 | return ret; |
@@ -1235,17 +1605,17 @@ static int domain_context_mapped(struct dmar_domain *domain, | |||
1235 | /* Secondary interface's bus number and devfn 0 */ | 1605 | /* Secondary interface's bus number and devfn 0 */ |
1236 | parent = pdev->bus->self; | 1606 | parent = pdev->bus->self; |
1237 | while (parent != tmp) { | 1607 | while (parent != tmp) { |
1238 | ret = device_context_mapped(domain->iommu, parent->bus->number, | 1608 | ret = device_context_mapped(iommu, parent->bus->number, |
1239 | parent->devfn); | 1609 | parent->devfn); |
1240 | if (!ret) | 1610 | if (!ret) |
1241 | return ret; | 1611 | return ret; |
1242 | parent = parent->bus->self; | 1612 | parent = parent->bus->self; |
1243 | } | 1613 | } |
1244 | if (tmp->is_pcie) | 1614 | if (tmp->is_pcie) |
1245 | return device_context_mapped(domain->iommu, | 1615 | return device_context_mapped(iommu, |
1246 | tmp->subordinate->number, 0); | 1616 | tmp->subordinate->number, 0); |
1247 | else | 1617 | else |
1248 | return device_context_mapped(domain->iommu, | 1618 | return device_context_mapped(iommu, |
1249 | tmp->bus->number, tmp->devfn); | 1619 | tmp->bus->number, tmp->devfn); |
1250 | } | 1620 | } |
1251 | 1621 | ||
@@ -1273,22 +1643,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, | |||
1273 | /* We don't need lock here, nobody else | 1643 | /* We don't need lock here, nobody else |
1274 | * touches the iova range | 1644 | * touches the iova range |
1275 | */ | 1645 | */ |
1276 | BUG_ON(dma_pte_addr(*pte)); | 1646 | BUG_ON(dma_pte_addr(pte)); |
1277 | dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); | 1647 | dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); |
1278 | dma_set_pte_prot(*pte, prot); | 1648 | dma_set_pte_prot(pte, prot); |
1279 | __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); | 1649 | domain_flush_cache(domain, pte, sizeof(*pte)); |
1280 | start_pfn++; | 1650 | start_pfn++; |
1281 | index++; | 1651 | index++; |
1282 | } | 1652 | } |
1283 | return 0; | 1653 | return 0; |
1284 | } | 1654 | } |
1285 | 1655 | ||
1286 | static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) | 1656 | static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn) |
1287 | { | 1657 | { |
1288 | clear_context_table(domain->iommu, bus, devfn); | 1658 | if (!iommu) |
1289 | domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, | 1659 | return; |
1660 | |||
1661 | clear_context_table(iommu, bus, devfn); | ||
1662 | iommu->flush.flush_context(iommu, 0, 0, 0, | ||
1290 | DMA_CCMD_GLOBAL_INVL, 0); | 1663 | DMA_CCMD_GLOBAL_INVL, 0); |
1291 | domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, | 1664 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
1292 | DMA_TLB_GLOBAL_FLUSH, 0); | 1665 | DMA_TLB_GLOBAL_FLUSH, 0); |
1293 | } | 1666 | } |
1294 | 1667 | ||
@@ -1296,6 +1669,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain) | |||
1296 | { | 1669 | { |
1297 | struct device_domain_info *info; | 1670 | struct device_domain_info *info; |
1298 | unsigned long flags; | 1671 | unsigned long flags; |
1672 | struct intel_iommu *iommu; | ||
1299 | 1673 | ||
1300 | spin_lock_irqsave(&device_domain_lock, flags); | 1674 | spin_lock_irqsave(&device_domain_lock, flags); |
1301 | while (!list_empty(&domain->devices)) { | 1675 | while (!list_empty(&domain->devices)) { |
@@ -1307,7 +1681,8 @@ static void domain_remove_dev_info(struct dmar_domain *domain) | |||
1307 | info->dev->dev.archdata.iommu = NULL; | 1681 | info->dev->dev.archdata.iommu = NULL; |
1308 | spin_unlock_irqrestore(&device_domain_lock, flags); | 1682 | spin_unlock_irqrestore(&device_domain_lock, flags); |
1309 | 1683 | ||
1310 | detach_domain_for_dev(info->domain, info->bus, info->devfn); | 1684 | iommu = device_to_iommu(info->bus, info->devfn); |
1685 | iommu_detach_dev(iommu, info->bus, info->devfn); | ||
1311 | free_devinfo_mem(info); | 1686 | free_devinfo_mem(info); |
1312 | 1687 | ||
1313 | spin_lock_irqsave(&device_domain_lock, flags); | 1688 | spin_lock_irqsave(&device_domain_lock, flags); |
@@ -1400,7 +1775,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) | |||
1400 | info->dev = NULL; | 1775 | info->dev = NULL; |
1401 | info->domain = domain; | 1776 | info->domain = domain; |
1402 | /* This domain is shared by devices under p2p bridge */ | 1777 | /* This domain is shared by devices under p2p bridge */ |
1403 | domain->flags |= DOMAIN_FLAG_MULTIPLE_DEVICES; | 1778 | domain->flags |= DOMAIN_FLAG_P2P_MULTIPLE_DEVICES; |
1404 | 1779 | ||
1405 | /* pcie-to-pci bridge already has a domain, uses it */ | 1780 | /* pcie-to-pci bridge already has a domain, uses it */ |
1406 | found = NULL; | 1781 | found = NULL; |
@@ -1563,6 +1938,11 @@ static void __init iommu_prepare_gfx_mapping(void) | |||
1563 | printk(KERN_ERR "IOMMU: mapping reserved region failed\n"); | 1938 | printk(KERN_ERR "IOMMU: mapping reserved region failed\n"); |
1564 | } | 1939 | } |
1565 | } | 1940 | } |
1941 | #else /* !CONFIG_DMAR_GFX_WA */ | ||
1942 | static inline void iommu_prepare_gfx_mapping(void) | ||
1943 | { | ||
1944 | return; | ||
1945 | } | ||
1566 | #endif | 1946 | #endif |
1567 | 1947 | ||
1568 | #ifdef CONFIG_DMAR_FLOPPY_WA | 1948 | #ifdef CONFIG_DMAR_FLOPPY_WA |
@@ -1590,7 +1970,7 @@ static inline void iommu_prepare_isa(void) | |||
1590 | } | 1970 | } |
1591 | #endif /* !CONFIG_DMAR_FLPY_WA */ | 1971 | #endif /* !CONFIG_DMAR_FLPY_WA */ |
1592 | 1972 | ||
1593 | int __init init_dmars(void) | 1973 | static int __init init_dmars(void) |
1594 | { | 1974 | { |
1595 | struct dmar_drhd_unit *drhd; | 1975 | struct dmar_drhd_unit *drhd; |
1596 | struct dmar_rmrr_unit *rmrr; | 1976 | struct dmar_rmrr_unit *rmrr; |
@@ -1613,9 +1993,18 @@ int __init init_dmars(void) | |||
1613 | */ | 1993 | */ |
1614 | } | 1994 | } |
1615 | 1995 | ||
1996 | g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), | ||
1997 | GFP_KERNEL); | ||
1998 | if (!g_iommus) { | ||
1999 | printk(KERN_ERR "Allocating global iommu array failed\n"); | ||
2000 | ret = -ENOMEM; | ||
2001 | goto error; | ||
2002 | } | ||
2003 | |||
1616 | deferred_flush = kzalloc(g_num_of_iommus * | 2004 | deferred_flush = kzalloc(g_num_of_iommus * |
1617 | sizeof(struct deferred_flush_tables), GFP_KERNEL); | 2005 | sizeof(struct deferred_flush_tables), GFP_KERNEL); |
1618 | if (!deferred_flush) { | 2006 | if (!deferred_flush) { |
2007 | kfree(g_iommus); | ||
1619 | ret = -ENOMEM; | 2008 | ret = -ENOMEM; |
1620 | goto error; | 2009 | goto error; |
1621 | } | 2010 | } |
@@ -1625,6 +2014,7 @@ int __init init_dmars(void) | |||
1625 | continue; | 2014 | continue; |
1626 | 2015 | ||
1627 | iommu = drhd->iommu; | 2016 | iommu = drhd->iommu; |
2017 | g_iommus[iommu->seq_id] = iommu; | ||
1628 | 2018 | ||
1629 | ret = iommu_init_domains(iommu); | 2019 | ret = iommu_init_domains(iommu); |
1630 | if (ret) | 2020 | if (ret) |
@@ -1737,6 +2127,7 @@ error: | |||
1737 | iommu = drhd->iommu; | 2127 | iommu = drhd->iommu; |
1738 | free_iommu(iommu); | 2128 | free_iommu(iommu); |
1739 | } | 2129 | } |
2130 | kfree(g_iommus); | ||
1740 | return ret; | 2131 | return ret; |
1741 | } | 2132 | } |
1742 | 2133 | ||
@@ -1805,7 +2196,7 @@ get_valid_domain_for_dev(struct pci_dev *pdev) | |||
1805 | } | 2196 | } |
1806 | 2197 | ||
1807 | /* make sure context mapping is ok */ | 2198 | /* make sure context mapping is ok */ |
1808 | if (unlikely(!domain_context_mapped(domain, pdev))) { | 2199 | if (unlikely(!domain_context_mapped(pdev))) { |
1809 | ret = domain_context_mapping(domain, pdev); | 2200 | ret = domain_context_mapping(domain, pdev); |
1810 | if (ret) { | 2201 | if (ret) { |
1811 | printk(KERN_ERR | 2202 | printk(KERN_ERR |
@@ -1827,6 +2218,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
1827 | struct iova *iova; | 2218 | struct iova *iova; |
1828 | int prot = 0; | 2219 | int prot = 0; |
1829 | int ret; | 2220 | int ret; |
2221 | struct intel_iommu *iommu; | ||
1830 | 2222 | ||
1831 | BUG_ON(dir == DMA_NONE); | 2223 | BUG_ON(dir == DMA_NONE); |
1832 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2224 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
@@ -1836,6 +2228,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
1836 | if (!domain) | 2228 | if (!domain) |
1837 | return 0; | 2229 | return 0; |
1838 | 2230 | ||
2231 | iommu = domain_get_iommu(domain); | ||
1839 | size = aligned_size((u64)paddr, size); | 2232 | size = aligned_size((u64)paddr, size); |
1840 | 2233 | ||
1841 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); | 2234 | iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); |
@@ -1849,7 +2242,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
1849 | * mappings.. | 2242 | * mappings.. |
1850 | */ | 2243 | */ |
1851 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ | 2244 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ |
1852 | !cap_zlr(domain->iommu->cap)) | 2245 | !cap_zlr(iommu->cap)) |
1853 | prot |= DMA_PTE_READ; | 2246 | prot |= DMA_PTE_READ; |
1854 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 2247 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
1855 | prot |= DMA_PTE_WRITE; | 2248 | prot |= DMA_PTE_WRITE; |
@@ -1865,10 +2258,10 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, | |||
1865 | goto error; | 2258 | goto error; |
1866 | 2259 | ||
1867 | /* it's a non-present to present mapping */ | 2260 | /* it's a non-present to present mapping */ |
1868 | ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, | 2261 | ret = iommu_flush_iotlb_psi(iommu, domain->id, |
1869 | start_paddr, size >> VTD_PAGE_SHIFT, 1); | 2262 | start_paddr, size >> VTD_PAGE_SHIFT, 1); |
1870 | if (ret) | 2263 | if (ret) |
1871 | iommu_flush_write_buffer(domain->iommu); | 2264 | iommu_flush_write_buffer(iommu); |
1872 | 2265 | ||
1873 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); | 2266 | return start_paddr + ((u64)paddr & (~PAGE_MASK)); |
1874 | 2267 | ||
@@ -1895,10 +2288,11 @@ static void flush_unmaps(void) | |||
1895 | 2288 | ||
1896 | /* just flush them all */ | 2289 | /* just flush them all */ |
1897 | for (i = 0; i < g_num_of_iommus; i++) { | 2290 | for (i = 0; i < g_num_of_iommus; i++) { |
1898 | if (deferred_flush[i].next) { | 2291 | struct intel_iommu *iommu = g_iommus[i]; |
1899 | struct intel_iommu *iommu = | 2292 | if (!iommu) |
1900 | deferred_flush[i].domain[0]->iommu; | 2293 | continue; |
1901 | 2294 | ||
2295 | if (deferred_flush[i].next) { | ||
1902 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, | 2296 | iommu->flush.flush_iotlb(iommu, 0, 0, 0, |
1903 | DMA_TLB_GLOBAL_FLUSH, 0); | 2297 | DMA_TLB_GLOBAL_FLUSH, 0); |
1904 | for (j = 0; j < deferred_flush[i].next; j++) { | 2298 | for (j = 0; j < deferred_flush[i].next; j++) { |
@@ -1925,12 +2319,14 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) | |||
1925 | { | 2319 | { |
1926 | unsigned long flags; | 2320 | unsigned long flags; |
1927 | int next, iommu_id; | 2321 | int next, iommu_id; |
2322 | struct intel_iommu *iommu; | ||
1928 | 2323 | ||
1929 | spin_lock_irqsave(&async_umap_flush_lock, flags); | 2324 | spin_lock_irqsave(&async_umap_flush_lock, flags); |
1930 | if (list_size == HIGH_WATER_MARK) | 2325 | if (list_size == HIGH_WATER_MARK) |
1931 | flush_unmaps(); | 2326 | flush_unmaps(); |
1932 | 2327 | ||
1933 | iommu_id = dom->iommu->seq_id; | 2328 | iommu = domain_get_iommu(dom); |
2329 | iommu_id = iommu->seq_id; | ||
1934 | 2330 | ||
1935 | next = deferred_flush[iommu_id].next; | 2331 | next = deferred_flush[iommu_id].next; |
1936 | deferred_flush[iommu_id].domain[next] = dom; | 2332 | deferred_flush[iommu_id].domain[next] = dom; |
@@ -1952,12 +2348,15 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | |||
1952 | struct dmar_domain *domain; | 2348 | struct dmar_domain *domain; |
1953 | unsigned long start_addr; | 2349 | unsigned long start_addr; |
1954 | struct iova *iova; | 2350 | struct iova *iova; |
2351 | struct intel_iommu *iommu; | ||
1955 | 2352 | ||
1956 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2353 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
1957 | return; | 2354 | return; |
1958 | domain = find_domain(pdev); | 2355 | domain = find_domain(pdev); |
1959 | BUG_ON(!domain); | 2356 | BUG_ON(!domain); |
1960 | 2357 | ||
2358 | iommu = domain_get_iommu(domain); | ||
2359 | |||
1961 | iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); | 2360 | iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr)); |
1962 | if (!iova) | 2361 | if (!iova) |
1963 | return; | 2362 | return; |
@@ -1973,9 +2372,9 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, | |||
1973 | /* free page tables */ | 2372 | /* free page tables */ |
1974 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2373 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
1975 | if (intel_iommu_strict) { | 2374 | if (intel_iommu_strict) { |
1976 | if (iommu_flush_iotlb_psi(domain->iommu, | 2375 | if (iommu_flush_iotlb_psi(iommu, |
1977 | domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) | 2376 | domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) |
1978 | iommu_flush_write_buffer(domain->iommu); | 2377 | iommu_flush_write_buffer(iommu); |
1979 | /* free iova */ | 2378 | /* free iova */ |
1980 | __free_iova(&domain->iovad, iova); | 2379 | __free_iova(&domain->iovad, iova); |
1981 | } else { | 2380 | } else { |
@@ -2036,11 +2435,15 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2036 | size_t size = 0; | 2435 | size_t size = 0; |
2037 | void *addr; | 2436 | void *addr; |
2038 | struct scatterlist *sg; | 2437 | struct scatterlist *sg; |
2438 | struct intel_iommu *iommu; | ||
2039 | 2439 | ||
2040 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2440 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
2041 | return; | 2441 | return; |
2042 | 2442 | ||
2043 | domain = find_domain(pdev); | 2443 | domain = find_domain(pdev); |
2444 | BUG_ON(!domain); | ||
2445 | |||
2446 | iommu = domain_get_iommu(domain); | ||
2044 | 2447 | ||
2045 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); | 2448 | iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address)); |
2046 | if (!iova) | 2449 | if (!iova) |
@@ -2057,9 +2460,9 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, | |||
2057 | /* free page tables */ | 2460 | /* free page tables */ |
2058 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); | 2461 | dma_pte_free_pagetable(domain, start_addr, start_addr + size); |
2059 | 2462 | ||
2060 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, | 2463 | if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr, |
2061 | size >> VTD_PAGE_SHIFT, 0)) | 2464 | size >> VTD_PAGE_SHIFT, 0)) |
2062 | iommu_flush_write_buffer(domain->iommu); | 2465 | iommu_flush_write_buffer(iommu); |
2063 | 2466 | ||
2064 | /* free iova */ | 2467 | /* free iova */ |
2065 | __free_iova(&domain->iovad, iova); | 2468 | __free_iova(&domain->iovad, iova); |
@@ -2093,6 +2496,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2093 | int ret; | 2496 | int ret; |
2094 | struct scatterlist *sg; | 2497 | struct scatterlist *sg; |
2095 | unsigned long start_addr; | 2498 | unsigned long start_addr; |
2499 | struct intel_iommu *iommu; | ||
2096 | 2500 | ||
2097 | BUG_ON(dir == DMA_NONE); | 2501 | BUG_ON(dir == DMA_NONE); |
2098 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) | 2502 | if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) |
@@ -2102,6 +2506,8 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2102 | if (!domain) | 2506 | if (!domain) |
2103 | return 0; | 2507 | return 0; |
2104 | 2508 | ||
2509 | iommu = domain_get_iommu(domain); | ||
2510 | |||
2105 | for_each_sg(sglist, sg, nelems, i) { | 2511 | for_each_sg(sglist, sg, nelems, i) { |
2106 | addr = SG_ENT_VIRT_ADDRESS(sg); | 2512 | addr = SG_ENT_VIRT_ADDRESS(sg); |
2107 | addr = (void *)virt_to_phys(addr); | 2513 | addr = (void *)virt_to_phys(addr); |
@@ -2119,7 +2525,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2119 | * mappings.. | 2525 | * mappings.. |
2120 | */ | 2526 | */ |
2121 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ | 2527 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ |
2122 | !cap_zlr(domain->iommu->cap)) | 2528 | !cap_zlr(iommu->cap)) |
2123 | prot |= DMA_PTE_READ; | 2529 | prot |= DMA_PTE_READ; |
2124 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) | 2530 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) |
2125 | prot |= DMA_PTE_WRITE; | 2531 | prot |= DMA_PTE_WRITE; |
@@ -2151,9 +2557,9 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, | |||
2151 | } | 2557 | } |
2152 | 2558 | ||
2153 | /* it's a non-present to present mapping */ | 2559 | /* it's a non-present to present mapping */ |
2154 | if (iommu_flush_iotlb_psi(domain->iommu, domain->id, | 2560 | if (iommu_flush_iotlb_psi(iommu, domain->id, |
2155 | start_addr, offset >> VTD_PAGE_SHIFT, 1)) | 2561 | start_addr, offset >> VTD_PAGE_SHIFT, 1)) |
2156 | iommu_flush_write_buffer(domain->iommu); | 2562 | iommu_flush_write_buffer(iommu); |
2157 | return nelems; | 2563 | return nelems; |
2158 | } | 2564 | } |
2159 | 2565 | ||
@@ -2325,10 +2731,220 @@ int __init intel_iommu_init(void) | |||
2325 | init_timer(&unmap_timer); | 2731 | init_timer(&unmap_timer); |
2326 | force_iommu = 1; | 2732 | force_iommu = 1; |
2327 | dma_ops = &intel_dma_ops; | 2733 | dma_ops = &intel_dma_ops; |
2734 | |||
2735 | register_iommu(&intel_iommu_ops); | ||
2736 | |||
2737 | return 0; | ||
2738 | } | ||
2739 | |||
2740 | static int vm_domain_add_dev_info(struct dmar_domain *domain, | ||
2741 | struct pci_dev *pdev) | ||
2742 | { | ||
2743 | struct device_domain_info *info; | ||
2744 | unsigned long flags; | ||
2745 | |||
2746 | info = alloc_devinfo_mem(); | ||
2747 | if (!info) | ||
2748 | return -ENOMEM; | ||
2749 | |||
2750 | info->bus = pdev->bus->number; | ||
2751 | info->devfn = pdev->devfn; | ||
2752 | info->dev = pdev; | ||
2753 | info->domain = domain; | ||
2754 | |||
2755 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2756 | list_add(&info->link, &domain->devices); | ||
2757 | list_add(&info->global, &device_domain_list); | ||
2758 | pdev->dev.archdata.iommu = info; | ||
2759 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2760 | |||
2761 | return 0; | ||
2762 | } | ||
2763 | |||
2764 | static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, | ||
2765 | struct pci_dev *pdev) | ||
2766 | { | ||
2767 | struct device_domain_info *info; | ||
2768 | struct intel_iommu *iommu; | ||
2769 | unsigned long flags; | ||
2770 | int found = 0; | ||
2771 | struct list_head *entry, *tmp; | ||
2772 | |||
2773 | iommu = device_to_iommu(pdev->bus->number, pdev->devfn); | ||
2774 | if (!iommu) | ||
2775 | return; | ||
2776 | |||
2777 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2778 | list_for_each_safe(entry, tmp, &domain->devices) { | ||
2779 | info = list_entry(entry, struct device_domain_info, link); | ||
2780 | if (info->bus == pdev->bus->number && | ||
2781 | info->devfn == pdev->devfn) { | ||
2782 | list_del(&info->link); | ||
2783 | list_del(&info->global); | ||
2784 | if (info->dev) | ||
2785 | info->dev->dev.archdata.iommu = NULL; | ||
2786 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2787 | |||
2788 | iommu_detach_dev(iommu, info->bus, info->devfn); | ||
2789 | free_devinfo_mem(info); | ||
2790 | |||
2791 | spin_lock_irqsave(&device_domain_lock, flags); | ||
2792 | |||
2793 | if (found) | ||
2794 | break; | ||
2795 | else | ||
2796 | continue; | ||
2797 | } | ||
2798 | |||
2799 | /* if there is no other devices under the same iommu | ||
2800 | * owned by this domain, clear this iommu in iommu_bmp | ||
2801 | * update iommu count and coherency | ||
2802 | */ | ||
2803 | if (device_to_iommu(info->bus, info->devfn) == iommu) | ||
2804 | found = 1; | ||
2805 | } | ||
2806 | |||
2807 | if (found == 0) { | ||
2808 | unsigned long tmp_flags; | ||
2809 | spin_lock_irqsave(&domain->iommu_lock, tmp_flags); | ||
2810 | clear_bit(iommu->seq_id, &domain->iommu_bmp); | ||
2811 | domain->iommu_count--; | ||
2812 | domain_update_iommu_coherency(domain); | ||
2813 | spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); | ||
2814 | } | ||
2815 | |||
2816 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
2817 | } | ||
2818 | |||
2819 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) | ||
2820 | { | ||
2821 | struct device_domain_info *info; | ||
2822 | struct intel_iommu *iommu; | ||
2823 | unsigned long flags1, flags2; | ||
2824 | |||
2825 | spin_lock_irqsave(&device_domain_lock, flags1); | ||
2826 | while (!list_empty(&domain->devices)) { | ||
2827 | info = list_entry(domain->devices.next, | ||
2828 | struct device_domain_info, link); | ||
2829 | list_del(&info->link); | ||
2830 | list_del(&info->global); | ||
2831 | if (info->dev) | ||
2832 | info->dev->dev.archdata.iommu = NULL; | ||
2833 | |||
2834 | spin_unlock_irqrestore(&device_domain_lock, flags1); | ||
2835 | |||
2836 | iommu = device_to_iommu(info->bus, info->devfn); | ||
2837 | iommu_detach_dev(iommu, info->bus, info->devfn); | ||
2838 | |||
2839 | /* clear this iommu in iommu_bmp, update iommu count | ||
2840 | * and coherency | ||
2841 | */ | ||
2842 | spin_lock_irqsave(&domain->iommu_lock, flags2); | ||
2843 | if (test_and_clear_bit(iommu->seq_id, | ||
2844 | &domain->iommu_bmp)) { | ||
2845 | domain->iommu_count--; | ||
2846 | domain_update_iommu_coherency(domain); | ||
2847 | } | ||
2848 | spin_unlock_irqrestore(&domain->iommu_lock, flags2); | ||
2849 | |||
2850 | free_devinfo_mem(info); | ||
2851 | spin_lock_irqsave(&device_domain_lock, flags1); | ||
2852 | } | ||
2853 | spin_unlock_irqrestore(&device_domain_lock, flags1); | ||
2854 | } | ||
2855 | |||
2856 | /* domain id for virtual machine, it won't be set in context */ | ||
2857 | static unsigned long vm_domid; | ||
2858 | |||
2859 | static int vm_domain_min_agaw(struct dmar_domain *domain) | ||
2860 | { | ||
2861 | int i; | ||
2862 | int min_agaw = domain->agaw; | ||
2863 | |||
2864 | i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); | ||
2865 | for (; i < g_num_of_iommus; ) { | ||
2866 | if (min_agaw > g_iommus[i]->agaw) | ||
2867 | min_agaw = g_iommus[i]->agaw; | ||
2868 | |||
2869 | i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1); | ||
2870 | } | ||
2871 | |||
2872 | return min_agaw; | ||
2873 | } | ||
2874 | |||
2875 | static struct dmar_domain *iommu_alloc_vm_domain(void) | ||
2876 | { | ||
2877 | struct dmar_domain *domain; | ||
2878 | |||
2879 | domain = alloc_domain_mem(); | ||
2880 | if (!domain) | ||
2881 | return NULL; | ||
2882 | |||
2883 | domain->id = vm_domid++; | ||
2884 | memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); | ||
2885 | domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; | ||
2886 | |||
2887 | return domain; | ||
2888 | } | ||
2889 | |||
2890 | static int vm_domain_init(struct dmar_domain *domain, int guest_width) | ||
2891 | { | ||
2892 | int adjust_width; | ||
2893 | |||
2894 | init_iova_domain(&domain->iovad, DMA_32BIT_PFN); | ||
2895 | spin_lock_init(&domain->mapping_lock); | ||
2896 | spin_lock_init(&domain->iommu_lock); | ||
2897 | |||
2898 | domain_reserve_special_ranges(domain); | ||
2899 | |||
2900 | /* calculate AGAW */ | ||
2901 | domain->gaw = guest_width; | ||
2902 | adjust_width = guestwidth_to_adjustwidth(guest_width); | ||
2903 | domain->agaw = width_to_agaw(adjust_width); | ||
2904 | |||
2905 | INIT_LIST_HEAD(&domain->devices); | ||
2906 | |||
2907 | domain->iommu_count = 0; | ||
2908 | domain->iommu_coherency = 0; | ||
2909 | domain->max_addr = 0; | ||
2910 | |||
2911 | /* always allocate the top pgd */ | ||
2912 | domain->pgd = (struct dma_pte *)alloc_pgtable_page(); | ||
2913 | if (!domain->pgd) | ||
2914 | return -ENOMEM; | ||
2915 | domain_flush_cache(domain, domain->pgd, PAGE_SIZE); | ||
2328 | return 0; | 2916 | return 0; |
2329 | } | 2917 | } |
2330 | 2918 | ||
2331 | void intel_iommu_domain_exit(struct dmar_domain *domain) | 2919 | static void iommu_free_vm_domain(struct dmar_domain *domain) |
2920 | { | ||
2921 | unsigned long flags; | ||
2922 | struct dmar_drhd_unit *drhd; | ||
2923 | struct intel_iommu *iommu; | ||
2924 | unsigned long i; | ||
2925 | unsigned long ndomains; | ||
2926 | |||
2927 | for_each_drhd_unit(drhd) { | ||
2928 | if (drhd->ignored) | ||
2929 | continue; | ||
2930 | iommu = drhd->iommu; | ||
2931 | |||
2932 | ndomains = cap_ndoms(iommu->cap); | ||
2933 | i = find_first_bit(iommu->domain_ids, ndomains); | ||
2934 | for (; i < ndomains; ) { | ||
2935 | if (iommu->domains[i] == domain) { | ||
2936 | spin_lock_irqsave(&iommu->lock, flags); | ||
2937 | clear_bit(i, iommu->domain_ids); | ||
2938 | iommu->domains[i] = NULL; | ||
2939 | spin_unlock_irqrestore(&iommu->lock, flags); | ||
2940 | break; | ||
2941 | } | ||
2942 | i = find_next_bit(iommu->domain_ids, ndomains, i+1); | ||
2943 | } | ||
2944 | } | ||
2945 | } | ||
2946 | |||
2947 | static void vm_domain_exit(struct dmar_domain *domain) | ||
2332 | { | 2948 | { |
2333 | u64 end; | 2949 | u64 end; |
2334 | 2950 | ||
@@ -2336,6 +2952,9 @@ void intel_iommu_domain_exit(struct dmar_domain *domain) | |||
2336 | if (!domain) | 2952 | if (!domain) |
2337 | return; | 2953 | return; |
2338 | 2954 | ||
2955 | vm_domain_remove_all_dev_info(domain); | ||
2956 | /* destroy iovas */ | ||
2957 | put_iova_domain(&domain->iovad); | ||
2339 | end = DOMAIN_MAX_ADDR(domain->gaw); | 2958 | end = DOMAIN_MAX_ADDR(domain->gaw); |
2340 | end = end & (~VTD_PAGE_MASK); | 2959 | end = end & (~VTD_PAGE_MASK); |
2341 | 2960 | ||
@@ -2345,94 +2964,167 @@ void intel_iommu_domain_exit(struct dmar_domain *domain) | |||
2345 | /* free page tables */ | 2964 | /* free page tables */ |
2346 | dma_pte_free_pagetable(domain, 0, end); | 2965 | dma_pte_free_pagetable(domain, 0, end); |
2347 | 2966 | ||
2348 | iommu_free_domain(domain); | 2967 | iommu_free_vm_domain(domain); |
2349 | free_domain_mem(domain); | 2968 | free_domain_mem(domain); |
2350 | } | 2969 | } |
2351 | EXPORT_SYMBOL_GPL(intel_iommu_domain_exit); | ||
2352 | 2970 | ||
2353 | struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev) | 2971 | static int intel_iommu_domain_init(struct iommu_domain *domain) |
2354 | { | 2972 | { |
2355 | struct dmar_drhd_unit *drhd; | 2973 | struct dmar_domain *dmar_domain; |
2356 | struct dmar_domain *domain; | ||
2357 | struct intel_iommu *iommu; | ||
2358 | |||
2359 | drhd = dmar_find_matched_drhd_unit(pdev); | ||
2360 | if (!drhd) { | ||
2361 | printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n"); | ||
2362 | return NULL; | ||
2363 | } | ||
2364 | 2974 | ||
2365 | iommu = drhd->iommu; | 2975 | dmar_domain = iommu_alloc_vm_domain(); |
2366 | if (!iommu) { | 2976 | if (!dmar_domain) { |
2367 | printk(KERN_ERR | ||
2368 | "intel_iommu_domain_alloc: iommu == NULL\n"); | ||
2369 | return NULL; | ||
2370 | } | ||
2371 | domain = iommu_alloc_domain(iommu); | ||
2372 | if (!domain) { | ||
2373 | printk(KERN_ERR | 2977 | printk(KERN_ERR |
2374 | "intel_iommu_domain_alloc: domain == NULL\n"); | 2978 | "intel_iommu_domain_init: dmar_domain == NULL\n"); |
2375 | return NULL; | 2979 | return -ENOMEM; |
2376 | } | 2980 | } |
2377 | if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { | 2981 | if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { |
2378 | printk(KERN_ERR | 2982 | printk(KERN_ERR |
2379 | "intel_iommu_domain_alloc: domain_init() failed\n"); | 2983 | "intel_iommu_domain_init() failed\n"); |
2380 | intel_iommu_domain_exit(domain); | 2984 | vm_domain_exit(dmar_domain); |
2381 | return NULL; | 2985 | return -ENOMEM; |
2382 | } | 2986 | } |
2383 | return domain; | 2987 | domain->priv = dmar_domain; |
2988 | |||
2989 | return 0; | ||
2384 | } | 2990 | } |
2385 | EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc); | ||
2386 | 2991 | ||
2387 | int intel_iommu_context_mapping( | 2992 | static void intel_iommu_domain_destroy(struct iommu_domain *domain) |
2388 | struct dmar_domain *domain, struct pci_dev *pdev) | ||
2389 | { | 2993 | { |
2390 | int rc; | 2994 | struct dmar_domain *dmar_domain = domain->priv; |
2391 | rc = domain_context_mapping(domain, pdev); | 2995 | |
2392 | return rc; | 2996 | domain->priv = NULL; |
2997 | vm_domain_exit(dmar_domain); | ||
2393 | } | 2998 | } |
2394 | EXPORT_SYMBOL_GPL(intel_iommu_context_mapping); | ||
2395 | 2999 | ||
2396 | int intel_iommu_page_mapping( | 3000 | static int intel_iommu_attach_device(struct iommu_domain *domain, |
2397 | struct dmar_domain *domain, dma_addr_t iova, | 3001 | struct device *dev) |
2398 | u64 hpa, size_t size, int prot) | ||
2399 | { | 3002 | { |
2400 | int rc; | 3003 | struct dmar_domain *dmar_domain = domain->priv; |
2401 | rc = domain_page_mapping(domain, iova, hpa, size, prot); | 3004 | struct pci_dev *pdev = to_pci_dev(dev); |
2402 | return rc; | 3005 | struct intel_iommu *iommu; |
3006 | int addr_width; | ||
3007 | u64 end; | ||
3008 | int ret; | ||
3009 | |||
3010 | /* normally pdev is not mapped */ | ||
3011 | if (unlikely(domain_context_mapped(pdev))) { | ||
3012 | struct dmar_domain *old_domain; | ||
3013 | |||
3014 | old_domain = find_domain(pdev); | ||
3015 | if (old_domain) { | ||
3016 | if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) | ||
3017 | vm_domain_remove_one_dev_info(old_domain, pdev); | ||
3018 | else | ||
3019 | domain_remove_dev_info(old_domain); | ||
3020 | } | ||
3021 | } | ||
3022 | |||
3023 | iommu = device_to_iommu(pdev->bus->number, pdev->devfn); | ||
3024 | if (!iommu) | ||
3025 | return -ENODEV; | ||
3026 | |||
3027 | /* check if this iommu agaw is sufficient for max mapped address */ | ||
3028 | addr_width = agaw_to_width(iommu->agaw); | ||
3029 | end = DOMAIN_MAX_ADDR(addr_width); | ||
3030 | end = end & VTD_PAGE_MASK; | ||
3031 | if (end < dmar_domain->max_addr) { | ||
3032 | printk(KERN_ERR "%s: iommu agaw (%d) is not " | ||
3033 | "sufficient for the mapped address (%llx)\n", | ||
3034 | __func__, iommu->agaw, dmar_domain->max_addr); | ||
3035 | return -EFAULT; | ||
3036 | } | ||
3037 | |||
3038 | ret = domain_context_mapping(dmar_domain, pdev); | ||
3039 | if (ret) | ||
3040 | return ret; | ||
3041 | |||
3042 | ret = vm_domain_add_dev_info(dmar_domain, pdev); | ||
3043 | return ret; | ||
2403 | } | 3044 | } |
2404 | EXPORT_SYMBOL_GPL(intel_iommu_page_mapping); | ||
2405 | 3045 | ||
2406 | void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn) | 3046 | static void intel_iommu_detach_device(struct iommu_domain *domain, |
3047 | struct device *dev) | ||
2407 | { | 3048 | { |
2408 | detach_domain_for_dev(domain, bus, devfn); | 3049 | struct dmar_domain *dmar_domain = domain->priv; |
3050 | struct pci_dev *pdev = to_pci_dev(dev); | ||
3051 | |||
3052 | vm_domain_remove_one_dev_info(dmar_domain, pdev); | ||
2409 | } | 3053 | } |
2410 | EXPORT_SYMBOL_GPL(intel_iommu_detach_dev); | ||
2411 | 3054 | ||
2412 | struct dmar_domain * | 3055 | static int intel_iommu_map_range(struct iommu_domain *domain, |
2413 | intel_iommu_find_domain(struct pci_dev *pdev) | 3056 | unsigned long iova, phys_addr_t hpa, |
3057 | size_t size, int iommu_prot) | ||
2414 | { | 3058 | { |
2415 | return find_domain(pdev); | 3059 | struct dmar_domain *dmar_domain = domain->priv; |
3060 | u64 max_addr; | ||
3061 | int addr_width; | ||
3062 | int prot = 0; | ||
3063 | int ret; | ||
3064 | |||
3065 | if (iommu_prot & IOMMU_READ) | ||
3066 | prot |= DMA_PTE_READ; | ||
3067 | if (iommu_prot & IOMMU_WRITE) | ||
3068 | prot |= DMA_PTE_WRITE; | ||
3069 | |||
3070 | max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); | ||
3071 | if (dmar_domain->max_addr < max_addr) { | ||
3072 | int min_agaw; | ||
3073 | u64 end; | ||
3074 | |||
3075 | /* check if minimum agaw is sufficient for mapped address */ | ||
3076 | min_agaw = vm_domain_min_agaw(dmar_domain); | ||
3077 | addr_width = agaw_to_width(min_agaw); | ||
3078 | end = DOMAIN_MAX_ADDR(addr_width); | ||
3079 | end = end & VTD_PAGE_MASK; | ||
3080 | if (end < max_addr) { | ||
3081 | printk(KERN_ERR "%s: iommu agaw (%d) is not " | ||
3082 | "sufficient for the mapped address (%llx)\n", | ||
3083 | __func__, min_agaw, max_addr); | ||
3084 | return -EFAULT; | ||
3085 | } | ||
3086 | dmar_domain->max_addr = max_addr; | ||
3087 | } | ||
3088 | |||
3089 | ret = domain_page_mapping(dmar_domain, iova, hpa, size, prot); | ||
3090 | return ret; | ||
2416 | } | 3091 | } |
2417 | EXPORT_SYMBOL_GPL(intel_iommu_find_domain); | ||
2418 | 3092 | ||
2419 | int intel_iommu_found(void) | 3093 | static void intel_iommu_unmap_range(struct iommu_domain *domain, |
3094 | unsigned long iova, size_t size) | ||
2420 | { | 3095 | { |
2421 | return g_num_of_iommus; | 3096 | struct dmar_domain *dmar_domain = domain->priv; |
3097 | dma_addr_t base; | ||
3098 | |||
3099 | /* The address might not be aligned */ | ||
3100 | base = iova & VTD_PAGE_MASK; | ||
3101 | size = VTD_PAGE_ALIGN(size); | ||
3102 | dma_pte_clear_range(dmar_domain, base, base + size); | ||
3103 | |||
3104 | if (dmar_domain->max_addr == base + size) | ||
3105 | dmar_domain->max_addr = base; | ||
2422 | } | 3106 | } |
2423 | EXPORT_SYMBOL_GPL(intel_iommu_found); | ||
2424 | 3107 | ||
2425 | u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) | 3108 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
3109 | unsigned long iova) | ||
2426 | { | 3110 | { |
3111 | struct dmar_domain *dmar_domain = domain->priv; | ||
2427 | struct dma_pte *pte; | 3112 | struct dma_pte *pte; |
2428 | u64 pfn; | 3113 | u64 phys = 0; |
2429 | |||
2430 | pfn = 0; | ||
2431 | pte = addr_to_dma_pte(domain, iova); | ||
2432 | 3114 | ||
3115 | pte = addr_to_dma_pte(dmar_domain, iova); | ||
2433 | if (pte) | 3116 | if (pte) |
2434 | pfn = dma_pte_addr(*pte); | 3117 | phys = dma_pte_addr(pte); |
2435 | 3118 | ||
2436 | return pfn >> VTD_PAGE_SHIFT; | 3119 | return phys; |
2437 | } | 3120 | } |
2438 | EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); | 3121 | |
3122 | static struct iommu_ops intel_iommu_ops = { | ||
3123 | .domain_init = intel_iommu_domain_init, | ||
3124 | .domain_destroy = intel_iommu_domain_destroy, | ||
3125 | .attach_dev = intel_iommu_attach_device, | ||
3126 | .detach_dev = intel_iommu_detach_device, | ||
3127 | .map = intel_iommu_map_range, | ||
3128 | .unmap = intel_iommu_unmap_range, | ||
3129 | .iova_to_phys = intel_iommu_iova_to_phys, | ||
3130 | }; | ||