aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pci
diff options
context:
space:
mode:
authorSuresh Siddha <suresh.b.siddha@intel.com>2008-07-10 14:16:35 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-12 02:44:46 -0400
commite61d98d8dad0048619bb138b0ff996422ffae53b (patch)
treef31fe1610a082e0e12605db879ff56546ad971e5 /drivers/pci
parent1ba89386db0a3f39590b90b5dd20d7149ae52de0 (diff)
x64, x2apic/intr-remap: Intel vt-d, IOMMU code reorganization
code reorganization of the generic Intel vt-d parsing related routines and linux iommu routines specific to Intel vt-d. drivers/pci/dmar.c now contains the generic vt-d parsing related routines drivers/pci/intel_iommu.c contains the iommu routines specific to vt-d Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: akpm@linux-foundation.org Cc: arjan@linux.intel.com Cc: andi@firstfloor.org Cc: ebiederm@xmission.com Cc: jbarnes@virtuousgeek.org Cc: steiner@sgi.com Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/dma_remapping.h155
-rw-r--r--drivers/pci/dmar.c90
-rw-r--r--drivers/pci/intel-iommu.c92
-rw-r--r--drivers/pci/intel-iommu.h163
4 files changed, 264 insertions, 236 deletions
diff --git a/drivers/pci/dma_remapping.h b/drivers/pci/dma_remapping.h
new file mode 100644
index 000000000000..05aac8ef96c7
--- /dev/null
+++ b/drivers/pci/dma_remapping.h
@@ -0,0 +1,155 @@
1#ifndef _DMA_REMAPPING_H
2#define _DMA_REMAPPING_H
3
4/*
5 * We need a fixed PAGE_SIZE of 4K irrespective of
6 * arch PAGE_SIZE for IOMMU page tables.
7 */
8#define PAGE_SHIFT_4K (12)
9#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
10#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
11#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
12
13#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
14#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
15#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
16
17
18/*
19 * 0: Present
20 * 1-11: Reserved
21 * 12-63: Context Ptr (12 - (haw-1))
22 * 64-127: Reserved
23 */
24struct root_entry {
25 u64 val;
26 u64 rsvd1;
27};
28#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
29static inline bool root_present(struct root_entry *root)
30{
31 return (root->val & 1);
32}
33static inline void set_root_present(struct root_entry *root)
34{
35 root->val |= 1;
36}
37static inline void set_root_value(struct root_entry *root, unsigned long value)
38{
39 root->val |= value & PAGE_MASK_4K;
40}
41
42struct context_entry;
43static inline struct context_entry *
44get_context_addr_from_root(struct root_entry *root)
45{
46 return (struct context_entry *)
47 (root_present(root)?phys_to_virt(
48 root->val & PAGE_MASK_4K):
49 NULL);
50}
51
52/*
53 * low 64 bits:
54 * 0: present
55 * 1: fault processing disable
56 * 2-3: translation type
57 * 12-63: address space root
58 * high 64 bits:
59 * 0-2: address width
60 * 3-6: aval
61 * 8-23: domain id
62 */
63struct context_entry {
64 u64 lo;
65 u64 hi;
66};
67#define context_present(c) ((c).lo & 1)
68#define context_fault_disable(c) (((c).lo >> 1) & 1)
69#define context_translation_type(c) (((c).lo >> 2) & 3)
70#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
71#define context_address_width(c) ((c).hi & 7)
72#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
73
74#define context_set_present(c) do {(c).lo |= 1;} while (0)
75#define context_set_fault_enable(c) \
76 do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
77#define context_set_translation_type(c, val) \
78 do { \
79 (c).lo &= (((u64)-1) << 4) | 3; \
80 (c).lo |= ((val) & 3) << 2; \
81 } while (0)
82#define CONTEXT_TT_MULTI_LEVEL 0
83#define context_set_address_root(c, val) \
84 do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
85#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
86#define context_set_domain_id(c, val) \
87 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
88#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
89
90/*
91 * 0: readable
92 * 1: writable
93 * 2-6: reserved
94 * 7: super page
95 * 8-11: available
96 * 12-63: Host physcial address
97 */
98struct dma_pte {
99 u64 val;
100};
101#define dma_clear_pte(p) do {(p).val = 0;} while (0)
102
103#define DMA_PTE_READ (1)
104#define DMA_PTE_WRITE (2)
105
106#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
107#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
108#define dma_set_pte_prot(p, prot) \
109 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
110#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
111#define dma_set_pte_addr(p, addr) do {\
112 (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
113#define dma_pte_present(p) (((p).val & 3) != 0)
114
115struct intel_iommu;
116
117struct dmar_domain {
118 int id; /* domain id */
119 struct intel_iommu *iommu; /* back pointer to owning iommu */
120
121 struct list_head devices; /* all devices' list */
122 struct iova_domain iovad; /* iova's that belong to this domain */
123
124 struct dma_pte *pgd; /* virtual address */
125 spinlock_t mapping_lock; /* page table lock */
126 int gaw; /* max guest address width */
127
128 /* adjusted guest address width, 0 is level 2 30-bit */
129 int agaw;
130
131#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
132 int flags;
133};
134
135/* PCI domain-device relationship */
136struct device_domain_info {
137 struct list_head link; /* link to domain siblings */
138 struct list_head global; /* link to global list */
139 u8 bus; /* PCI bus numer */
140 u8 devfn; /* PCI devfn number */
141 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
142 struct dmar_domain *domain; /* pointer to domain */
143};
144
145extern int init_dmars(void);
146extern void free_dmar_iommu(struct intel_iommu *iommu);
147
148#ifndef CONFIG_DMAR_GFX_WA
149static inline void iommu_prepare_gfx_mapping(void)
150{
151 return;
152}
153#endif /* !CONFIG_DMAR_GFX_WA */
154
155#endif
diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c
index f941f609dbf3..c00e387f5b75 100644
--- a/drivers/pci/dmar.c
+++ b/drivers/pci/dmar.c
@@ -19,9 +19,11 @@
19 * Author: Shaohua Li <shaohua.li@intel.com> 19 * Author: Shaohua Li <shaohua.li@intel.com>
20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 20 * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
21 * 21 *
22 * This file implements early detection/parsing of DMA Remapping Devices 22 * This file implements early detection/parsing of Remapping Devices
23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI 23 * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI
24 * tables. 24 * tables.
25 *
26 * These routines are used by both DMA-remapping and Interrupt-remapping
25 */ 27 */
26 28
27#include <linux/pci.h> 29#include <linux/pci.h>
@@ -300,6 +302,37 @@ parse_dmar_table(void)
300 return ret; 302 return ret;
301} 303}
302 304
305int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
306 struct pci_dev *dev)
307{
308 int index;
309
310 while (dev) {
311 for (index = 0; index < cnt; index++)
312 if (dev == devices[index])
313 return 1;
314
315 /* Check our parent */
316 dev = dev->bus->self;
317 }
318
319 return 0;
320}
321
322struct dmar_drhd_unit *
323dmar_find_matched_drhd_unit(struct pci_dev *dev)
324{
325 struct dmar_drhd_unit *drhd = NULL;
326
327 list_for_each_entry(drhd, &dmar_drhd_units, list) {
328 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
329 drhd->devices_cnt, dev))
330 return drhd;
331 }
332
333 return NULL;
334}
335
303 336
304int __init dmar_table_init(void) 337int __init dmar_table_init(void)
305{ 338{
@@ -343,3 +376,58 @@ int __init early_dmar_detect(void)
343 376
344 return (ACPI_SUCCESS(status) ? 1 : 0); 377 return (ACPI_SUCCESS(status) ? 1 : 0);
345} 378}
379
380struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
381 struct dmar_drhd_unit *drhd)
382{
383 int map_size;
384 u32 ver;
385
386 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
387 if (!iommu->reg) {
388 printk(KERN_ERR "IOMMU: can't map the region\n");
389 goto error;
390 }
391 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
392 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
393
394 /* the registers might be more than one page */
395 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
396 cap_max_fault_reg_offset(iommu->cap));
397 map_size = PAGE_ALIGN_4K(map_size);
398 if (map_size > PAGE_SIZE_4K) {
399 iounmap(iommu->reg);
400 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
401 if (!iommu->reg) {
402 printk(KERN_ERR "IOMMU: can't map the region\n");
403 goto error;
404 }
405 }
406
407 ver = readl(iommu->reg + DMAR_VER_REG);
408 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
409 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
410 iommu->cap, iommu->ecap);
411
412 spin_lock_init(&iommu->register_lock);
413
414 drhd->iommu = iommu;
415 return iommu;
416error:
417 kfree(iommu);
418 return NULL;
419}
420
421void free_iommu(struct intel_iommu *iommu)
422{
423 if (!iommu)
424 return;
425
426#ifdef CONFIG_DMAR
427 free_dmar_iommu(iommu);
428#endif
429
430 if (iommu->reg)
431 iounmap(iommu->reg);
432 kfree(iommu);
433}
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index bb0642318a95..1c0270d3e2e5 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -990,6 +990,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
990 return -ENOMEM; 990 return -ENOMEM;
991 } 991 }
992 992
993 spin_lock_init(&iommu->lock);
994
993 /* 995 /*
994 * if Caching mode is set, then invalid translations are tagged 996 * if Caching mode is set, then invalid translations are tagged
995 * with domainid 0. Hence we need to pre-allocate it. 997 * with domainid 0. Hence we need to pre-allocate it.
@@ -998,62 +1000,15 @@ static int iommu_init_domains(struct intel_iommu *iommu)
998 set_bit(0, iommu->domain_ids); 1000 set_bit(0, iommu->domain_ids);
999 return 0; 1001 return 0;
1000} 1002}
1001static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
1002 struct dmar_drhd_unit *drhd)
1003{
1004 int ret;
1005 int map_size;
1006 u32 ver;
1007
1008 iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K);
1009 if (!iommu->reg) {
1010 printk(KERN_ERR "IOMMU: can't map the region\n");
1011 goto error;
1012 }
1013 iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
1014 iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
1015
1016 /* the registers might be more than one page */
1017 map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
1018 cap_max_fault_reg_offset(iommu->cap));
1019 map_size = PAGE_ALIGN_4K(map_size);
1020 if (map_size > PAGE_SIZE_4K) {
1021 iounmap(iommu->reg);
1022 iommu->reg = ioremap(drhd->reg_base_addr, map_size);
1023 if (!iommu->reg) {
1024 printk(KERN_ERR "IOMMU: can't map the region\n");
1025 goto error;
1026 }
1027 }
1028 1003
1029 ver = readl(iommu->reg + DMAR_VER_REG);
1030 pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
1031 drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
1032 iommu->cap, iommu->ecap);
1033 ret = iommu_init_domains(iommu);
1034 if (ret)
1035 goto error_unmap;
1036 spin_lock_init(&iommu->lock);
1037 spin_lock_init(&iommu->register_lock);
1038
1039 drhd->iommu = iommu;
1040 return iommu;
1041error_unmap:
1042 iounmap(iommu->reg);
1043error:
1044 kfree(iommu);
1045 return NULL;
1046}
1047 1004
1048static void domain_exit(struct dmar_domain *domain); 1005static void domain_exit(struct dmar_domain *domain);
1049static void free_iommu(struct intel_iommu *iommu) 1006
1007void free_dmar_iommu(struct intel_iommu *iommu)
1050{ 1008{
1051 struct dmar_domain *domain; 1009 struct dmar_domain *domain;
1052 int i; 1010 int i;
1053 1011
1054 if (!iommu)
1055 return;
1056
1057 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); 1012 i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
1058 for (; i < cap_ndoms(iommu->cap); ) { 1013 for (; i < cap_ndoms(iommu->cap); ) {
1059 domain = iommu->domains[i]; 1014 domain = iommu->domains[i];
@@ -1078,10 +1033,6 @@ static void free_iommu(struct intel_iommu *iommu)
1078 1033
1079 /* free context mapping */ 1034 /* free context mapping */
1080 free_context_table(iommu); 1035 free_context_table(iommu);
1081
1082 if (iommu->reg)
1083 iounmap(iommu->reg);
1084 kfree(iommu);
1085} 1036}
1086 1037
1087static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) 1038static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
@@ -1426,37 +1377,6 @@ find_domain(struct pci_dev *pdev)
1426 return NULL; 1377 return NULL;
1427} 1378}
1428 1379
1429static int dmar_pci_device_match(struct pci_dev *devices[], int cnt,
1430 struct pci_dev *dev)
1431{
1432 int index;
1433
1434 while (dev) {
1435 for (index = 0; index < cnt; index++)
1436 if (dev == devices[index])
1437 return 1;
1438
1439 /* Check our parent */
1440 dev = dev->bus->self;
1441 }
1442
1443 return 0;
1444}
1445
1446static struct dmar_drhd_unit *
1447dmar_find_matched_drhd_unit(struct pci_dev *dev)
1448{
1449 struct dmar_drhd_unit *drhd = NULL;
1450
1451 list_for_each_entry(drhd, &dmar_drhd_units, list) {
1452 if (drhd->include_all || dmar_pci_device_match(drhd->devices,
1453 drhd->devices_cnt, dev))
1454 return drhd;
1455 }
1456
1457 return NULL;
1458}
1459
1460/* domain is initialized */ 1380/* domain is initialized */
1461static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) 1381static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
1462{ 1382{
@@ -1764,6 +1684,10 @@ int __init init_dmars(void)
1764 goto error; 1684 goto error;
1765 } 1685 }
1766 1686
1687 ret = iommu_init_domains(iommu);
1688 if (ret)
1689 goto error;
1690
1767 /* 1691 /*
1768 * TBD: 1692 * TBD:
1769 * we could share the same root & context tables 1693 * we could share the same root & context tables
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h
index afc0ad96122e..9e5e98c76c05 100644
--- a/drivers/pci/intel-iommu.h
+++ b/drivers/pci/intel-iommu.h
@@ -27,19 +27,7 @@
27#include <linux/sysdev.h> 27#include <linux/sysdev.h>
28#include "iova.h" 28#include "iova.h"
29#include <linux/io.h> 29#include <linux/io.h>
30 30#include "dma_remapping.h"
31/*
32 * We need a fixed PAGE_SIZE of 4K irrespective of
33 * arch PAGE_SIZE for IOMMU page tables.
34 */
35#define PAGE_SHIFT_4K (12)
36#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K)
37#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K)
38#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K)
39
40#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K)
41#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
42#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
43 31
44/* 32/*
45 * Intel IOMMU register specification per version 1.0 public spec. 33 * Intel IOMMU register specification per version 1.0 public spec.
@@ -187,158 +175,31 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
187#define dma_frcd_source_id(c) (c & 0xffff) 175#define dma_frcd_source_id(c) (c & 0xffff)
188#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ 176#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */
189 177
190/*
191 * 0: Present
192 * 1-11: Reserved
193 * 12-63: Context Ptr (12 - (haw-1))
194 * 64-127: Reserved
195 */
196struct root_entry {
197 u64 val;
198 u64 rsvd1;
199};
200#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry))
201static inline bool root_present(struct root_entry *root)
202{
203 return (root->val & 1);
204}
205static inline void set_root_present(struct root_entry *root)
206{
207 root->val |= 1;
208}
209static inline void set_root_value(struct root_entry *root, unsigned long value)
210{
211 root->val |= value & PAGE_MASK_4K;
212}
213
214struct context_entry;
215static inline struct context_entry *
216get_context_addr_from_root(struct root_entry *root)
217{
218 return (struct context_entry *)
219 (root_present(root)?phys_to_virt(
220 root->val & PAGE_MASK_4K):
221 NULL);
222}
223
224/*
225 * low 64 bits:
226 * 0: present
227 * 1: fault processing disable
228 * 2-3: translation type
229 * 12-63: address space root
230 * high 64 bits:
231 * 0-2: address width
232 * 3-6: aval
233 * 8-23: domain id
234 */
235struct context_entry {
236 u64 lo;
237 u64 hi;
238};
239#define context_present(c) ((c).lo & 1)
240#define context_fault_disable(c) (((c).lo >> 1) & 1)
241#define context_translation_type(c) (((c).lo >> 2) & 3)
242#define context_address_root(c) ((c).lo & PAGE_MASK_4K)
243#define context_address_width(c) ((c).hi & 7)
244#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1))
245
246#define context_set_present(c) do {(c).lo |= 1;} while (0)
247#define context_set_fault_enable(c) \
248 do {(c).lo &= (((u64)-1) << 2) | 1;} while (0)
249#define context_set_translation_type(c, val) \
250 do { \
251 (c).lo &= (((u64)-1) << 4) | 3; \
252 (c).lo |= ((val) & 3) << 2; \
253 } while (0)
254#define CONTEXT_TT_MULTI_LEVEL 0
255#define context_set_address_root(c, val) \
256 do {(c).lo |= (val) & PAGE_MASK_4K;} while (0)
257#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0)
258#define context_set_domain_id(c, val) \
259 do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0)
260#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0)
261
262/*
263 * 0: readable
264 * 1: writable
265 * 2-6: reserved
266 * 7: super page
267 * 8-11: available
268 * 12-63: Host physcial address
269 */
270struct dma_pte {
271 u64 val;
272};
273#define dma_clear_pte(p) do {(p).val = 0;} while (0)
274
275#define DMA_PTE_READ (1)
276#define DMA_PTE_WRITE (2)
277
278#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0)
279#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0)
280#define dma_set_pte_prot(p, prot) \
281 do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0)
282#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K)
283#define dma_set_pte_addr(p, addr) do {\
284 (p).val |= ((addr) & PAGE_MASK_4K); } while (0)
285#define dma_pte_present(p) (((p).val & 3) != 0)
286
287struct intel_iommu;
288
289struct dmar_domain {
290 int id; /* domain id */
291 struct intel_iommu *iommu; /* back pointer to owning iommu */
292
293 struct list_head devices; /* all devices' list */
294 struct iova_domain iovad; /* iova's that belong to this domain */
295
296 struct dma_pte *pgd; /* virtual address */
297 spinlock_t mapping_lock; /* page table lock */
298 int gaw; /* max guest address width */
299
300 /* adjusted guest address width, 0 is level 2 30-bit */
301 int agaw;
302
303#define DOMAIN_FLAG_MULTIPLE_DEVICES 1
304 int flags;
305};
306
307/* PCI domain-device relationship */
308struct device_domain_info {
309 struct list_head link; /* link to domain siblings */
310 struct list_head global; /* link to global list */
311 u8 bus; /* PCI bus numer */
312 u8 devfn; /* PCI devfn number */
313 struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
314 struct dmar_domain *domain; /* pointer to domain */
315};
316
317extern int init_dmars(void);
318
319struct intel_iommu { 178struct intel_iommu {
320 void __iomem *reg; /* Pointer to hardware regs, virtual addr */ 179 void __iomem *reg; /* Pointer to hardware regs, virtual addr */
321 u64 cap; 180 u64 cap;
322 u64 ecap; 181 u64 ecap;
323 unsigned long *domain_ids; /* bitmap of domains */
324 struct dmar_domain **domains; /* ptr to domains */
325 int seg; 182 int seg;
326 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ 183 u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
327 spinlock_t lock; /* protect context, domain ids */
328 spinlock_t register_lock; /* protect register handling */ 184 spinlock_t register_lock; /* protect register handling */
185
186#ifdef CONFIG_DMAR
187 unsigned long *domain_ids; /* bitmap of domains */
188 struct dmar_domain **domains; /* ptr to domains */
189 spinlock_t lock; /* protect context, domain ids */
329 struct root_entry *root_entry; /* virtual address */ 190 struct root_entry *root_entry; /* virtual address */
330 191
331 unsigned int irq; 192 unsigned int irq;
332 unsigned char name[7]; /* Device Name */ 193 unsigned char name[7]; /* Device Name */
333 struct msi_msg saved_msg; 194 struct msi_msg saved_msg;
334 struct sys_device sysdev; 195 struct sys_device sysdev;
196#endif
335}; 197};
336 198
337#ifndef CONFIG_DMAR_GFX_WA 199extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
338static inline void iommu_prepare_gfx_mapping(void) 200
339{ 201extern struct intel_iommu *alloc_iommu(struct intel_iommu *iommu,
340 return; 202 struct dmar_drhd_unit *drhd);
341} 203extern void free_iommu(struct intel_iommu *iommu);
342#endif /* !CONFIG_DMAR_GFX_WA */
343 204
344#endif 205#endif