aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-29 23:00:13 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-29 23:00:13 -0500
commitb3a4bcaa5a56860610bd096829702f80273b5a67 (patch)
tree57a37e12d5fc8be6540e9f98cd381f6fb5e06654 /drivers/iommu
parent17c7f85460d6b0e2bd11a736683bd81c4388474f (diff)
parentdd1a175695edf662615e422d1c85eae875a411b2 (diff)
Merge tag 'iommu-updates-v3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU Updates from Joerg Roedel: "A few patches have been queued up for this merge window: - improvements for the ARM-SMMU driver (IOMMU_EXEC support, IOMMU group support) - updates and fixes for the shmobile IOMMU driver - various fixes to generic IOMMU code and the Intel IOMMU driver - some cleanups in IOMMU drivers (dev_is_pci() usage)" * tag 'iommu-updates-v3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (36 commits) iommu/vt-d: Fix signedness bug in alloc_irte() iommu/vt-d: free all resources if failed to initialize DMARs iommu/vt-d, trivial: clean sparse warnings iommu/vt-d: fix wrong return value of dmar_table_init() iommu/vt-d: release invalidation queue when destroying IOMMU unit iommu/vt-d: fix access after free issue in function free_dmar_iommu() iommu/vt-d: keep shared resources when failed to initialize iommu devices iommu/vt-d: fix invalid memory access when freeing DMAR irq iommu/vt-d, trivial: simplify code with existing macros iommu/vt-d, trivial: use defined macro instead of hardcoding iommu/vt-d: mark internal functions as static iommu/vt-d, trivial: clean up unused code iommu/vt-d, trivial: check suitable flag in function detect_intel_iommu() iommu/vt-d, trivial: print correct domain id of static identity domain iommu/vt-d, trivial: refine support of 64bit guest address iommu/vt-d: fix resource leakage on error recovery path in iommu_init_domains() iommu/vt-d: fix a race window in allocating domain ID for virtual machines iommu/vt-d: fix PCI device reference leakage on error recovery path drm/msm: Fix link error with !MSM_IOMMU iommu/vt-d: use dedicated bitmap to track remapping entry allocation status ...
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/arm-smmu.c33
-rw-r--r--drivers/iommu/dmar.c135
-rw-r--r--drivers/iommu/fsl_pamu_domain.c6
-rw-r--r--drivers/iommu/intel-iommu.c216
-rw-r--r--drivers/iommu/intel_irq_remapping.c105
-rw-r--r--drivers/iommu/irq_remapping.c6
-rw-r--r--drivers/iommu/of_iommu.c1
-rw-r--r--drivers/iommu/shmobile-iommu.c3
-rw-r--r--drivers/iommu/shmobile-ipmmu.c10
-rw-r--r--drivers/iommu/shmobile-ipmmu.h2
12 files changed, 258 insertions, 264 deletions
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 3e7fdbb4916b..79bbc21c1d01 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -207,6 +207,7 @@ config SHMOBILE_IOMMU
207 bool "IOMMU for Renesas IPMMU/IPMMUI" 207 bool "IOMMU for Renesas IPMMU/IPMMUI"
208 default n 208 default n
209 depends on ARM 209 depends on ARM
210 depends on SH_MOBILE || COMPILE_TEST
210 select IOMMU_API 211 select IOMMU_API
211 select ARM_DMA_USE_IOMMU 212 select ARM_DMA_USE_IOMMU
212 select SHMOBILE_IPMMU 213 select SHMOBILE_IPMMU
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 72531f008a5e..faf0da4bb3a2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -248,8 +248,8 @@ static bool check_device(struct device *dev)
248 if (!dev || !dev->dma_mask) 248 if (!dev || !dev->dma_mask)
249 return false; 249 return false;
250 250
251 /* No device or no PCI device */ 251 /* No PCI device */
252 if (dev->bus != &pci_bus_type) 252 if (!dev_is_pci(dev))
253 return false; 253 return false;
254 254
255 devid = get_device_id(dev); 255 devid = get_device_id(dev);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e46a88700b68..8911850c9444 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -24,7 +24,7 @@
24 * - v7/v8 long-descriptor format 24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU 25 * - Non-secure access to the SMMU
26 * - 4k and 64k pages, with contiguous pte hints. 26 * - 4k and 64k pages, with contiguous pte hints.
27 * - Up to 39-bit addressing 27 * - Up to 42-bit addressing (dependent on VA_BITS)
28 * - Context fault reporting 28 * - Context fault reporting
29 */ 29 */
30 30
@@ -61,12 +61,13 @@
61#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize) 61#define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
62 62
63/* Page table bits */ 63/* Page table bits */
64#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) 64#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53)
65#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) 65#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
66#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) 66#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
67#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) 67#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
68#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) 68#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
69#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) 69#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
70#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
70 71
71#if PAGE_SIZE == SZ_4K 72#if PAGE_SIZE == SZ_4K
72#define ARM_SMMU_PTE_CONT_ENTRIES 16 73#define ARM_SMMU_PTE_CONT_ENTRIES 16
@@ -1205,7 +1206,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1205 unsigned long pfn, int flags, int stage) 1206 unsigned long pfn, int flags, int stage)
1206{ 1207{
1207 pte_t *pte, *start; 1208 pte_t *pte, *start;
1208 pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF; 1209 pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN;
1209 1210
1210 if (pmd_none(*pmd)) { 1211 if (pmd_none(*pmd)) {
1211 /* Allocate a new set of tables */ 1212 /* Allocate a new set of tables */
@@ -1244,7 +1245,9 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1244 } 1245 }
1245 1246
1246 /* If no access, create a faulting entry to avoid TLB fills */ 1247 /* If no access, create a faulting entry to avoid TLB fills */
1247 if (!(flags & (IOMMU_READ | IOMMU_WRITE))) 1248 if (flags & IOMMU_EXEC)
1249 pteval &= ~ARM_SMMU_PTE_XN;
1250 else if (!(flags & (IOMMU_READ | IOMMU_WRITE)))
1248 pteval &= ~ARM_SMMU_PTE_PAGE; 1251 pteval &= ~ARM_SMMU_PTE_PAGE;
1249 1252
1250 pteval |= ARM_SMMU_PTE_SH_IS; 1253 pteval |= ARM_SMMU_PTE_SH_IS;
@@ -1494,6 +1497,13 @@ static int arm_smmu_add_device(struct device *dev)
1494{ 1497{
1495 struct arm_smmu_device *child, *parent, *smmu; 1498 struct arm_smmu_device *child, *parent, *smmu;
1496 struct arm_smmu_master *master = NULL; 1499 struct arm_smmu_master *master = NULL;
1500 struct iommu_group *group;
1501 int ret;
1502
1503 if (dev->archdata.iommu) {
1504 dev_warn(dev, "IOMMU driver already assigned to device\n");
1505 return -EINVAL;
1506 }
1497 1507
1498 spin_lock(&arm_smmu_devices_lock); 1508 spin_lock(&arm_smmu_devices_lock);
1499 list_for_each_entry(parent, &arm_smmu_devices, list) { 1509 list_for_each_entry(parent, &arm_smmu_devices, list) {
@@ -1526,13 +1536,23 @@ static int arm_smmu_add_device(struct device *dev)
1526 if (!master) 1536 if (!master)
1527 return -ENODEV; 1537 return -ENODEV;
1528 1538
1539 group = iommu_group_alloc();
1540 if (IS_ERR(group)) {
1541 dev_err(dev, "Failed to allocate IOMMU group\n");
1542 return PTR_ERR(group);
1543 }
1544
1545 ret = iommu_group_add_device(group, dev);
1546 iommu_group_put(group);
1529 dev->archdata.iommu = smmu; 1547 dev->archdata.iommu = smmu;
1530 return 0; 1548
1549 return ret;
1531} 1550}
1532 1551
1533static void arm_smmu_remove_device(struct device *dev) 1552static void arm_smmu_remove_device(struct device *dev)
1534{ 1553{
1535 dev->archdata.iommu = NULL; 1554 dev->archdata.iommu = NULL;
1555 iommu_group_remove_device(dev);
1536} 1556}
1537 1557
1538static struct iommu_ops arm_smmu_ops = { 1558static struct iommu_ops arm_smmu_ops = {
@@ -1730,7 +1750,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1730 * allocation (PTRS_PER_PGD). 1750 * allocation (PTRS_PER_PGD).
1731 */ 1751 */
1732#ifdef CONFIG_64BIT 1752#ifdef CONFIG_64BIT
1733 /* Current maximum output size of 39 bits */
1734 smmu->s1_output_size = min(39UL, size); 1753 smmu->s1_output_size = min(39UL, size);
1735#else 1754#else
1736 smmu->s1_output_size = min(32UL, size); 1755 smmu->s1_output_size = min(32UL, size);
@@ -1745,7 +1764,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1745 } else { 1764 } else {
1746#ifdef CONFIG_64BIT 1765#ifdef CONFIG_64BIT
1747 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; 1766 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1748 size = min(39, arm_smmu_id_size_to_bits(size)); 1767 size = min(VA_BITS, arm_smmu_id_size_to_bits(size));
1749#else 1768#else
1750 size = 32; 1769 size = 32;
1751#endif 1770#endif
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8b452c9676d9..158156543410 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -52,6 +52,9 @@ LIST_HEAD(dmar_drhd_units);
52struct acpi_table_header * __initdata dmar_tbl; 52struct acpi_table_header * __initdata dmar_tbl;
53static acpi_size dmar_tbl_size; 53static acpi_size dmar_tbl_size;
54 54
55static int alloc_iommu(struct dmar_drhd_unit *drhd);
56static void free_iommu(struct intel_iommu *iommu);
57
55static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) 58static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
56{ 59{
57 /* 60 /*
@@ -100,7 +103,6 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
100 if (!pdev) { 103 if (!pdev) {
101 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n", 104 pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
102 segment, scope->bus, path->device, path->function); 105 segment, scope->bus, path->device, path->function);
103 *dev = NULL;
104 return 0; 106 return 0;
105 } 107 }
106 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \ 108 if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && \
@@ -151,7 +153,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
151 ret = dmar_parse_one_dev_scope(scope, 153 ret = dmar_parse_one_dev_scope(scope,
152 &(*devices)[index], segment); 154 &(*devices)[index], segment);
153 if (ret) { 155 if (ret) {
154 kfree(*devices); 156 dmar_free_dev_scope(devices, cnt);
155 return ret; 157 return ret;
156 } 158 }
157 index ++; 159 index ++;
@@ -162,6 +164,17 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
162 return 0; 164 return 0;
163} 165}
164 166
167void dmar_free_dev_scope(struct pci_dev ***devices, int *cnt)
168{
169 if (*devices && *cnt) {
170 while (--*cnt >= 0)
171 pci_dev_put((*devices)[*cnt]);
172 kfree(*devices);
173 *devices = NULL;
174 *cnt = 0;
175 }
176}
177
165/** 178/**
166 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition 179 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
167 * structure which uniquely represent one DMA remapping hardware unit 180 * structure which uniquely represent one DMA remapping hardware unit
@@ -193,25 +206,28 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
193 return 0; 206 return 0;
194} 207}
195 208
209static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
210{
211 if (dmaru->devices && dmaru->devices_cnt)
212 dmar_free_dev_scope(&dmaru->devices, &dmaru->devices_cnt);
213 if (dmaru->iommu)
214 free_iommu(dmaru->iommu);
215 kfree(dmaru);
216}
217
196static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) 218static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru)
197{ 219{
198 struct acpi_dmar_hardware_unit *drhd; 220 struct acpi_dmar_hardware_unit *drhd;
199 int ret = 0;
200 221
201 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; 222 drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr;
202 223
203 if (dmaru->include_all) 224 if (dmaru->include_all)
204 return 0; 225 return 0;
205 226
206 ret = dmar_parse_dev_scope((void *)(drhd + 1), 227 return dmar_parse_dev_scope((void *)(drhd + 1),
207 ((void *)drhd) + drhd->header.length, 228 ((void *)drhd) + drhd->header.length,
208 &dmaru->devices_cnt, &dmaru->devices, 229 &dmaru->devices_cnt, &dmaru->devices,
209 drhd->segment); 230 drhd->segment);
210 if (ret) {
211 list_del(&dmaru->list);
212 kfree(dmaru);
213 }
214 return ret;
215} 231}
216 232
217#ifdef CONFIG_ACPI_NUMA 233#ifdef CONFIG_ACPI_NUMA
@@ -423,7 +439,7 @@ dmar_find_matched_drhd_unit(struct pci_dev *dev)
423int __init dmar_dev_scope_init(void) 439int __init dmar_dev_scope_init(void)
424{ 440{
425 static int dmar_dev_scope_initialized; 441 static int dmar_dev_scope_initialized;
426 struct dmar_drhd_unit *drhd, *drhd_n; 442 struct dmar_drhd_unit *drhd;
427 int ret = -ENODEV; 443 int ret = -ENODEV;
428 444
429 if (dmar_dev_scope_initialized) 445 if (dmar_dev_scope_initialized)
@@ -432,7 +448,7 @@ int __init dmar_dev_scope_init(void)
432 if (list_empty(&dmar_drhd_units)) 448 if (list_empty(&dmar_drhd_units))
433 goto fail; 449 goto fail;
434 450
435 list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { 451 list_for_each_entry(drhd, &dmar_drhd_units, list) {
436 ret = dmar_parse_dev(drhd); 452 ret = dmar_parse_dev(drhd);
437 if (ret) 453 if (ret)
438 goto fail; 454 goto fail;
@@ -456,24 +472,23 @@ int __init dmar_table_init(void)
456 static int dmar_table_initialized; 472 static int dmar_table_initialized;
457 int ret; 473 int ret;
458 474
459 if (dmar_table_initialized) 475 if (dmar_table_initialized == 0) {
460 return 0; 476 ret = parse_dmar_table();
461 477 if (ret < 0) {
462 dmar_table_initialized = 1; 478 if (ret != -ENODEV)
463 479 pr_info("parse DMAR table failure.\n");
464 ret = parse_dmar_table(); 480 } else if (list_empty(&dmar_drhd_units)) {
465 if (ret) { 481 pr_info("No DMAR devices found\n");
466 if (ret != -ENODEV) 482 ret = -ENODEV;
467 pr_info("parse DMAR table failure.\n"); 483 }
468 return ret;
469 }
470 484
471 if (list_empty(&dmar_drhd_units)) { 485 if (ret < 0)
472 pr_info("No DMAR devices found\n"); 486 dmar_table_initialized = ret;
473 return -ENODEV; 487 else
488 dmar_table_initialized = 1;
474 } 489 }
475 490
476 return 0; 491 return dmar_table_initialized < 0 ? dmar_table_initialized : 0;
477} 492}
478 493
479static void warn_invalid_dmar(u64 addr, const char *message) 494static void warn_invalid_dmar(u64 addr, const char *message)
@@ -488,7 +503,7 @@ static void warn_invalid_dmar(u64 addr, const char *message)
488 dmi_get_system_info(DMI_PRODUCT_VERSION)); 503 dmi_get_system_info(DMI_PRODUCT_VERSION));
489} 504}
490 505
491int __init check_zero_address(void) 506static int __init check_zero_address(void)
492{ 507{
493 struct acpi_table_dmar *dmar; 508 struct acpi_table_dmar *dmar;
494 struct acpi_dmar_header *entry_header; 509 struct acpi_dmar_header *entry_header;
@@ -546,14 +561,6 @@ int __init detect_intel_iommu(void)
546 if (ret) 561 if (ret)
547 ret = check_zero_address(); 562 ret = check_zero_address();
548 { 563 {
549 struct acpi_table_dmar *dmar;
550
551 dmar = (struct acpi_table_dmar *) dmar_tbl;
552
553 if (ret && irq_remapping_enabled && cpu_has_x2apic &&
554 dmar->flags & 0x1)
555 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
556
557 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { 564 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
558 iommu_detected = 1; 565 iommu_detected = 1;
559 /* Make sure ACS will be enabled */ 566 /* Make sure ACS will be enabled */
@@ -565,7 +572,7 @@ int __init detect_intel_iommu(void)
565 x86_init.iommu.iommu_init = intel_iommu_init; 572 x86_init.iommu.iommu_init = intel_iommu_init;
566#endif 573#endif
567 } 574 }
568 early_acpi_os_unmap_memory(dmar_tbl, dmar_tbl_size); 575 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
569 dmar_tbl = NULL; 576 dmar_tbl = NULL;
570 577
571 return ret ? 1 : -ENODEV; 578 return ret ? 1 : -ENODEV;
@@ -647,7 +654,7 @@ out:
647 return err; 654 return err;
648} 655}
649 656
650int alloc_iommu(struct dmar_drhd_unit *drhd) 657static int alloc_iommu(struct dmar_drhd_unit *drhd)
651{ 658{
652 struct intel_iommu *iommu; 659 struct intel_iommu *iommu;
653 u32 ver, sts; 660 u32 ver, sts;
@@ -721,12 +728,19 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
721 return err; 728 return err;
722} 729}
723 730
724void free_iommu(struct intel_iommu *iommu) 731static void free_iommu(struct intel_iommu *iommu)
725{ 732{
726 if (!iommu) 733 if (iommu->irq) {
727 return; 734 free_irq(iommu->irq, iommu);
735 irq_set_handler_data(iommu->irq, NULL);
736 destroy_irq(iommu->irq);
737 }
728 738
729 free_dmar_iommu(iommu); 739 if (iommu->qi) {
740 free_page((unsigned long)iommu->qi->desc);
741 kfree(iommu->qi->desc_status);
742 kfree(iommu->qi);
743 }
730 744
731 if (iommu->reg) 745 if (iommu->reg)
732 unmap_iommu(iommu); 746 unmap_iommu(iommu);
@@ -1050,7 +1064,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
1050 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); 1064 desc_page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0);
1051 if (!desc_page) { 1065 if (!desc_page) {
1052 kfree(qi); 1066 kfree(qi);
1053 iommu->qi = 0; 1067 iommu->qi = NULL;
1054 return -ENOMEM; 1068 return -ENOMEM;
1055 } 1069 }
1056 1070
@@ -1060,7 +1074,7 @@ int dmar_enable_qi(struct intel_iommu *iommu)
1060 if (!qi->desc_status) { 1074 if (!qi->desc_status) {
1061 free_page((unsigned long) qi->desc); 1075 free_page((unsigned long) qi->desc);
1062 kfree(qi); 1076 kfree(qi);
1063 iommu->qi = 0; 1077 iommu->qi = NULL;
1064 return -ENOMEM; 1078 return -ENOMEM;
1065 } 1079 }
1066 1080
@@ -1111,9 +1125,7 @@ static const char *irq_remap_fault_reasons[] =
1111 "Blocked an interrupt request due to source-id verification failure", 1125 "Blocked an interrupt request due to source-id verification failure",
1112}; 1126};
1113 1127
1114#define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) 1128static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1115
1116const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type)
1117{ 1129{
1118 if (fault_reason >= 0x20 && (fault_reason - 0x20 < 1130 if (fault_reason >= 0x20 && (fault_reason - 0x20 <
1119 ARRAY_SIZE(irq_remap_fault_reasons))) { 1131 ARRAY_SIZE(irq_remap_fault_reasons))) {
@@ -1303,15 +1315,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
1303int __init enable_drhd_fault_handling(void) 1315int __init enable_drhd_fault_handling(void)
1304{ 1316{
1305 struct dmar_drhd_unit *drhd; 1317 struct dmar_drhd_unit *drhd;
1318 struct intel_iommu *iommu;
1306 1319
1307 /* 1320 /*
1308 * Enable fault control interrupt. 1321 * Enable fault control interrupt.
1309 */ 1322 */
1310 for_each_drhd_unit(drhd) { 1323 for_each_iommu(iommu, drhd) {
1311 int ret;
1312 struct intel_iommu *iommu = drhd->iommu;
1313 u32 fault_status; 1324 u32 fault_status;
1314 ret = dmar_set_interrupt(iommu); 1325 int ret = dmar_set_interrupt(iommu);
1315 1326
1316 if (ret) { 1327 if (ret) {
1317 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n", 1328 pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
@@ -1366,4 +1377,22 @@ int __init dmar_ir_support(void)
1366 return 0; 1377 return 0;
1367 return dmar->flags & 0x1; 1378 return dmar->flags & 0x1;
1368} 1379}
1380
1381static int __init dmar_free_unused_resources(void)
1382{
1383 struct dmar_drhd_unit *dmaru, *dmaru_n;
1384
1385 /* DMAR units are in use */
1386 if (irq_remapping_enabled || intel_iommu_enabled)
1387 return 0;
1388
1389 list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) {
1390 list_del(&dmaru->list);
1391 dmar_free_drhd(dmaru);
1392 }
1393
1394 return 0;
1395}
1396
1397late_initcall(dmar_free_unused_resources);
1369IOMMU_INIT_POST(detect_intel_iommu); 1398IOMMU_INIT_POST(detect_intel_iommu);
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index c857c30da979..93072ba44b1d 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -691,7 +691,7 @@ static int fsl_pamu_attach_device(struct iommu_domain *domain,
691 * Use LIODN of the PCI controller while attaching a 691 * Use LIODN of the PCI controller while attaching a
692 * PCI device. 692 * PCI device.
693 */ 693 */
694 if (dev->bus == &pci_bus_type) { 694 if (dev_is_pci(dev)) {
695 pdev = to_pci_dev(dev); 695 pdev = to_pci_dev(dev);
696 pci_ctl = pci_bus_to_host(pdev->bus); 696 pci_ctl = pci_bus_to_host(pdev->bus);
697 /* 697 /*
@@ -729,7 +729,7 @@ static void fsl_pamu_detach_device(struct iommu_domain *domain,
729 * Use LIODN of the PCI controller while detaching a 729 * Use LIODN of the PCI controller while detaching a
730 * PCI device. 730 * PCI device.
731 */ 731 */
732 if (dev->bus == &pci_bus_type) { 732 if (dev_is_pci(dev)) {
733 pdev = to_pci_dev(dev); 733 pdev = to_pci_dev(dev);
734 pci_ctl = pci_bus_to_host(pdev->bus); 734 pci_ctl = pci_bus_to_host(pdev->bus);
735 /* 735 /*
@@ -1056,7 +1056,7 @@ static int fsl_pamu_add_device(struct device *dev)
1056 * For platform devices we allocate a separate group for 1056 * For platform devices we allocate a separate group for
1057 * each of the devices. 1057 * each of the devices.
1058 */ 1058 */
1059 if (dev->bus == &pci_bus_type) { 1059 if (dev_is_pci(dev)) {
1060 pdev = to_pci_dev(dev); 1060 pdev = to_pci_dev(dev);
1061 /* Don't create device groups for virtual PCI bridges */ 1061 /* Don't create device groups for virtual PCI bridges */
1062 if (pdev->subordinate) 1062 if (pdev->subordinate)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 59779e19315e..a22c86c867fa 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -63,6 +63,7 @@
63#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 63#define DEFAULT_DOMAIN_ADDRESS_WIDTH 48
64 64
65#define MAX_AGAW_WIDTH 64 65#define MAX_AGAW_WIDTH 64
66#define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
66 67
67#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1) 68#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
68#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1) 69#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
@@ -106,12 +107,12 @@ static inline int agaw_to_level(int agaw)
106 107
107static inline int agaw_to_width(int agaw) 108static inline int agaw_to_width(int agaw)
108{ 109{
109 return 30 + agaw * LEVEL_STRIDE; 110 return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
110} 111}
111 112
112static inline int width_to_agaw(int width) 113static inline int width_to_agaw(int width)
113{ 114{
114 return (width - 30) / LEVEL_STRIDE; 115 return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
115} 116}
116 117
117static inline unsigned int level_to_offset_bits(int level) 118static inline unsigned int level_to_offset_bits(int level)
@@ -141,7 +142,7 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
141 142
142static inline unsigned long lvl_to_nr_pages(unsigned int lvl) 143static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
143{ 144{
144 return 1 << ((lvl - 1) * LEVEL_STRIDE); 145 return 1 << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
145} 146}
146 147
147/* VT-d pages must always be _smaller_ than MM pages. Otherwise things 148/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
@@ -288,26 +289,6 @@ static inline void dma_clear_pte(struct dma_pte *pte)
288 pte->val = 0; 289 pte->val = 0;
289} 290}
290 291
291static inline void dma_set_pte_readable(struct dma_pte *pte)
292{
293 pte->val |= DMA_PTE_READ;
294}
295
296static inline void dma_set_pte_writable(struct dma_pte *pte)
297{
298 pte->val |= DMA_PTE_WRITE;
299}
300
301static inline void dma_set_pte_snp(struct dma_pte *pte)
302{
303 pte->val |= DMA_PTE_SNP;
304}
305
306static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
307{
308 pte->val = (pte->val & ~3) | (prot & 3);
309}
310
311static inline u64 dma_pte_addr(struct dma_pte *pte) 292static inline u64 dma_pte_addr(struct dma_pte *pte)
312{ 293{
313#ifdef CONFIG_64BIT 294#ifdef CONFIG_64BIT
@@ -318,11 +299,6 @@ static inline u64 dma_pte_addr(struct dma_pte *pte)
318#endif 299#endif
319} 300}
320 301
321static inline void dma_set_pte_pfn(struct dma_pte *pte, unsigned long pfn)
322{
323 pte->val |= (uint64_t)pfn << VTD_PAGE_SHIFT;
324}
325
326static inline bool dma_pte_present(struct dma_pte *pte) 302static inline bool dma_pte_present(struct dma_pte *pte)
327{ 303{
328 return (pte->val & 3) != 0; 304 return (pte->val & 3) != 0;
@@ -406,7 +382,7 @@ struct device_domain_info {
406 382
407static void flush_unmaps_timeout(unsigned long data); 383static void flush_unmaps_timeout(unsigned long data);
408 384
409DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); 385static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
410 386
411#define HIGH_WATER_MARK 250 387#define HIGH_WATER_MARK 250
412struct deferred_flush_tables { 388struct deferred_flush_tables {
@@ -652,9 +628,7 @@ static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
652 struct dmar_drhd_unit *drhd = NULL; 628 struct dmar_drhd_unit *drhd = NULL;
653 int i; 629 int i;
654 630
655 for_each_drhd_unit(drhd) { 631 for_each_active_drhd_unit(drhd) {
656 if (drhd->ignored)
657 continue;
658 if (segment != drhd->segment) 632 if (segment != drhd->segment)
659 continue; 633 continue;
660 634
@@ -865,7 +839,6 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
865 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; 839 int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
866 unsigned int large_page = 1; 840 unsigned int large_page = 1;
867 struct dma_pte *first_pte, *pte; 841 struct dma_pte *first_pte, *pte;
868 int order;
869 842
870 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); 843 BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
871 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); 844 BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@@ -890,8 +863,7 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
890 863
891 } while (start_pfn && start_pfn <= last_pfn); 864 } while (start_pfn && start_pfn <= last_pfn);
892 865
893 order = (large_page - 1) * 9; 866 return min_t(int, (large_page - 1) * 9, MAX_AGAW_PFN_WIDTH);
894 return order;
895} 867}
896 868
897static void dma_pte_free_level(struct dmar_domain *domain, int level, 869static void dma_pte_free_level(struct dmar_domain *domain, int level,
@@ -1255,8 +1227,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1255 unsigned long nlongs; 1227 unsigned long nlongs;
1256 1228
1257 ndomains = cap_ndoms(iommu->cap); 1229 ndomains = cap_ndoms(iommu->cap);
1258 pr_debug("IOMMU %d: Number of Domains supported <%ld>\n", iommu->seq_id, 1230 pr_debug("IOMMU%d: Number of Domains supported <%ld>\n",
1259 ndomains); 1231 iommu->seq_id, ndomains);
1260 nlongs = BITS_TO_LONGS(ndomains); 1232 nlongs = BITS_TO_LONGS(ndomains);
1261 1233
1262 spin_lock_init(&iommu->lock); 1234 spin_lock_init(&iommu->lock);
@@ -1266,13 +1238,17 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1266 */ 1238 */
1267 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); 1239 iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
1268 if (!iommu->domain_ids) { 1240 if (!iommu->domain_ids) {
1269 printk(KERN_ERR "Allocating domain id array failed\n"); 1241 pr_err("IOMMU%d: allocating domain id array failed\n",
1242 iommu->seq_id);
1270 return -ENOMEM; 1243 return -ENOMEM;
1271 } 1244 }
1272 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *), 1245 iommu->domains = kcalloc(ndomains, sizeof(struct dmar_domain *),
1273 GFP_KERNEL); 1246 GFP_KERNEL);
1274 if (!iommu->domains) { 1247 if (!iommu->domains) {
1275 printk(KERN_ERR "Allocating domain array failed\n"); 1248 pr_err("IOMMU%d: allocating domain array failed\n",
1249 iommu->seq_id);
1250 kfree(iommu->domain_ids);
1251 iommu->domain_ids = NULL;
1276 return -ENOMEM; 1252 return -ENOMEM;
1277 } 1253 }
1278 1254
@@ -1289,10 +1265,10 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1289static void domain_exit(struct dmar_domain *domain); 1265static void domain_exit(struct dmar_domain *domain);
1290static void vm_domain_exit(struct dmar_domain *domain); 1266static void vm_domain_exit(struct dmar_domain *domain);
1291 1267
1292void free_dmar_iommu(struct intel_iommu *iommu) 1268static void free_dmar_iommu(struct intel_iommu *iommu)
1293{ 1269{
1294 struct dmar_domain *domain; 1270 struct dmar_domain *domain;
1295 int i; 1271 int i, count;
1296 unsigned long flags; 1272 unsigned long flags;
1297 1273
1298 if ((iommu->domains) && (iommu->domain_ids)) { 1274 if ((iommu->domains) && (iommu->domain_ids)) {
@@ -1301,28 +1277,24 @@ void free_dmar_iommu(struct intel_iommu *iommu)
1301 clear_bit(i, iommu->domain_ids); 1277 clear_bit(i, iommu->domain_ids);
1302 1278
1303 spin_lock_irqsave(&domain->iommu_lock, flags); 1279 spin_lock_irqsave(&domain->iommu_lock, flags);
1304 if (--domain->iommu_count == 0) { 1280 count = --domain->iommu_count;
1281 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1282 if (count == 0) {
1305 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) 1283 if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE)
1306 vm_domain_exit(domain); 1284 vm_domain_exit(domain);
1307 else 1285 else
1308 domain_exit(domain); 1286 domain_exit(domain);
1309 } 1287 }
1310 spin_unlock_irqrestore(&domain->iommu_lock, flags);
1311 } 1288 }
1312 } 1289 }
1313 1290
1314 if (iommu->gcmd & DMA_GCMD_TE) 1291 if (iommu->gcmd & DMA_GCMD_TE)
1315 iommu_disable_translation(iommu); 1292 iommu_disable_translation(iommu);
1316 1293
1317 if (iommu->irq) {
1318 irq_set_handler_data(iommu->irq, NULL);
1319 /* This will mask the irq */
1320 free_irq(iommu->irq, iommu);
1321 destroy_irq(iommu->irq);
1322 }
1323
1324 kfree(iommu->domains); 1294 kfree(iommu->domains);
1325 kfree(iommu->domain_ids); 1295 kfree(iommu->domain_ids);
1296 iommu->domains = NULL;
1297 iommu->domain_ids = NULL;
1326 1298
1327 g_iommus[iommu->seq_id] = NULL; 1299 g_iommus[iommu->seq_id] = NULL;
1328 1300
@@ -2245,8 +2217,6 @@ static int __init si_domain_init(int hw)
2245 if (!si_domain) 2217 if (!si_domain)
2246 return -EFAULT; 2218 return -EFAULT;
2247 2219
2248 pr_debug("Identity mapping domain is domain %d\n", si_domain->id);
2249
2250 for_each_active_iommu(iommu, drhd) { 2220 for_each_active_iommu(iommu, drhd) {
2251 ret = iommu_attach_domain(si_domain, iommu); 2221 ret = iommu_attach_domain(si_domain, iommu);
2252 if (ret) { 2222 if (ret) {
@@ -2261,6 +2231,8 @@ static int __init si_domain_init(int hw)
2261 } 2231 }
2262 2232
2263 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; 2233 si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
2234 pr_debug("IOMMU: identity mapping domain is domain %d\n",
2235 si_domain->id);
2264 2236
2265 if (hw) 2237 if (hw)
2266 return 0; 2238 return 0;
@@ -2492,11 +2464,7 @@ static int __init init_dmars(void)
2492 goto error; 2464 goto error;
2493 } 2465 }
2494 2466
2495 for_each_drhd_unit(drhd) { 2467 for_each_active_iommu(iommu, drhd) {
2496 if (drhd->ignored)
2497 continue;
2498
2499 iommu = drhd->iommu;
2500 g_iommus[iommu->seq_id] = iommu; 2468 g_iommus[iommu->seq_id] = iommu;
2501 2469
2502 ret = iommu_init_domains(iommu); 2470 ret = iommu_init_domains(iommu);
@@ -2520,12 +2488,7 @@ static int __init init_dmars(void)
2520 /* 2488 /*
2521 * Start from the sane iommu hardware state. 2489 * Start from the sane iommu hardware state.
2522 */ 2490 */
2523 for_each_drhd_unit(drhd) { 2491 for_each_active_iommu(iommu, drhd) {
2524 if (drhd->ignored)
2525 continue;
2526
2527 iommu = drhd->iommu;
2528
2529 /* 2492 /*
2530 * If the queued invalidation is already initialized by us 2493 * If the queued invalidation is already initialized by us
2531 * (for example, while enabling interrupt-remapping) then 2494 * (for example, while enabling interrupt-remapping) then
@@ -2545,12 +2508,7 @@ static int __init init_dmars(void)
2545 dmar_disable_qi(iommu); 2508 dmar_disable_qi(iommu);
2546 } 2509 }
2547 2510
2548 for_each_drhd_unit(drhd) { 2511 for_each_active_iommu(iommu, drhd) {
2549 if (drhd->ignored)
2550 continue;
2551
2552 iommu = drhd->iommu;
2553
2554 if (dmar_enable_qi(iommu)) { 2512 if (dmar_enable_qi(iommu)) {
2555 /* 2513 /*
2556 * Queued Invalidate not enabled, use Register Based 2514 * Queued Invalidate not enabled, use Register Based
@@ -2633,17 +2591,16 @@ static int __init init_dmars(void)
2633 * global invalidate iotlb 2591 * global invalidate iotlb
2634 * enable translation 2592 * enable translation
2635 */ 2593 */
2636 for_each_drhd_unit(drhd) { 2594 for_each_iommu(iommu, drhd) {
2637 if (drhd->ignored) { 2595 if (drhd->ignored) {
2638 /* 2596 /*
2639 * we always have to disable PMRs or DMA may fail on 2597 * we always have to disable PMRs or DMA may fail on
2640 * this device 2598 * this device
2641 */ 2599 */
2642 if (force_on) 2600 if (force_on)
2643 iommu_disable_protect_mem_regions(drhd->iommu); 2601 iommu_disable_protect_mem_regions(iommu);
2644 continue; 2602 continue;
2645 } 2603 }
2646 iommu = drhd->iommu;
2647 2604
2648 iommu_flush_write_buffer(iommu); 2605 iommu_flush_write_buffer(iommu);
2649 2606
@@ -2665,12 +2622,9 @@ static int __init init_dmars(void)
2665 2622
2666 return 0; 2623 return 0;
2667error: 2624error:
2668 for_each_drhd_unit(drhd) { 2625 for_each_active_iommu(iommu, drhd)
2669 if (drhd->ignored) 2626 free_dmar_iommu(iommu);
2670 continue; 2627 kfree(deferred_flush);
2671 iommu = drhd->iommu;
2672 free_iommu(iommu);
2673 }
2674 kfree(g_iommus); 2628 kfree(g_iommus);
2675 return ret; 2629 return ret;
2676} 2630}
@@ -2758,7 +2712,7 @@ static int iommu_no_mapping(struct device *dev)
2758 struct pci_dev *pdev; 2712 struct pci_dev *pdev;
2759 int found; 2713 int found;
2760 2714
2761 if (unlikely(dev->bus != &pci_bus_type)) 2715 if (unlikely(!dev_is_pci(dev)))
2762 return 1; 2716 return 1;
2763 2717
2764 pdev = to_pci_dev(dev); 2718 pdev = to_pci_dev(dev);
@@ -3318,9 +3272,9 @@ static void __init init_no_remapping_devices(void)
3318 } 3272 }
3319 } 3273 }
3320 3274
3321 for_each_drhd_unit(drhd) { 3275 for_each_active_drhd_unit(drhd) {
3322 int i; 3276 int i;
3323 if (drhd->ignored || drhd->include_all) 3277 if (drhd->include_all)
3324 continue; 3278 continue;
3325 3279
3326 for (i = 0; i < drhd->devices_cnt; i++) 3280 for (i = 0; i < drhd->devices_cnt; i++)
@@ -3514,18 +3468,12 @@ static int __init
3514rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) 3468rmrr_parse_dev(struct dmar_rmrr_unit *rmrru)
3515{ 3469{
3516 struct acpi_dmar_reserved_memory *rmrr; 3470 struct acpi_dmar_reserved_memory *rmrr;
3517 int ret;
3518 3471
3519 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; 3472 rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr;
3520 ret = dmar_parse_dev_scope((void *)(rmrr + 1), 3473 return dmar_parse_dev_scope((void *)(rmrr + 1),
3521 ((void *)rmrr) + rmrr->header.length, 3474 ((void *)rmrr) + rmrr->header.length,
3522 &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); 3475 &rmrru->devices_cnt, &rmrru->devices,
3523 3476 rmrr->segment);
3524 if (ret || (rmrru->devices_cnt == 0)) {
3525 list_del(&rmrru->list);
3526 kfree(rmrru);
3527 }
3528 return ret;
3529} 3477}
3530 3478
3531static LIST_HEAD(dmar_atsr_units); 3479static LIST_HEAD(dmar_atsr_units);
@@ -3550,23 +3498,39 @@ int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr)
3550 3498
3551static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru) 3499static int __init atsr_parse_dev(struct dmar_atsr_unit *atsru)
3552{ 3500{
3553 int rc;
3554 struct acpi_dmar_atsr *atsr; 3501 struct acpi_dmar_atsr *atsr;
3555 3502
3556 if (atsru->include_all) 3503 if (atsru->include_all)
3557 return 0; 3504 return 0;
3558 3505
3559 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header); 3506 atsr = container_of(atsru->hdr, struct acpi_dmar_atsr, header);
3560 rc = dmar_parse_dev_scope((void *)(atsr + 1), 3507 return dmar_parse_dev_scope((void *)(atsr + 1),
3561 (void *)atsr + atsr->header.length, 3508 (void *)atsr + atsr->header.length,
3562 &atsru->devices_cnt, &atsru->devices, 3509 &atsru->devices_cnt, &atsru->devices,
3563 atsr->segment); 3510 atsr->segment);
3564 if (rc || !atsru->devices_cnt) { 3511}
3565 list_del(&atsru->list); 3512
3566 kfree(atsru); 3513static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3514{
3515 dmar_free_dev_scope(&atsru->devices, &atsru->devices_cnt);
3516 kfree(atsru);
3517}
3518
3519static void intel_iommu_free_dmars(void)
3520{
3521 struct dmar_rmrr_unit *rmrru, *rmrr_n;
3522 struct dmar_atsr_unit *atsru, *atsr_n;
3523
3524 list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
3525 list_del(&rmrru->list);
3526 dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
3527 kfree(rmrru);
3567 } 3528 }
3568 3529
3569 return rc; 3530 list_for_each_entry_safe(atsru, atsr_n, &dmar_atsr_units, list) {
3531 list_del(&atsru->list);
3532 intel_iommu_free_atsr(atsru);
3533 }
3570} 3534}
3571 3535
3572int dmar_find_matched_atsr_unit(struct pci_dev *dev) 3536int dmar_find_matched_atsr_unit(struct pci_dev *dev)
@@ -3610,17 +3574,17 @@ found:
3610 3574
3611int __init dmar_parse_rmrr_atsr_dev(void) 3575int __init dmar_parse_rmrr_atsr_dev(void)
3612{ 3576{
3613 struct dmar_rmrr_unit *rmrr, *rmrr_n; 3577 struct dmar_rmrr_unit *rmrr;
3614 struct dmar_atsr_unit *atsr, *atsr_n; 3578 struct dmar_atsr_unit *atsr;
3615 int ret = 0; 3579 int ret = 0;
3616 3580
3617 list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { 3581 list_for_each_entry(rmrr, &dmar_rmrr_units, list) {
3618 ret = rmrr_parse_dev(rmrr); 3582 ret = rmrr_parse_dev(rmrr);
3619 if (ret) 3583 if (ret)
3620 return ret; 3584 return ret;
3621 } 3585 }
3622 3586
3623 list_for_each_entry_safe(atsr, atsr_n, &dmar_atsr_units, list) { 3587 list_for_each_entry(atsr, &dmar_atsr_units, list) {
3624 ret = atsr_parse_dev(atsr); 3588 ret = atsr_parse_dev(atsr);
3625 if (ret) 3589 if (ret)
3626 return ret; 3590 return ret;
@@ -3667,8 +3631,9 @@ static struct notifier_block device_nb = {
3667 3631
3668int __init intel_iommu_init(void) 3632int __init intel_iommu_init(void)
3669{ 3633{
3670 int ret = 0; 3634 int ret = -ENODEV;
3671 struct dmar_drhd_unit *drhd; 3635 struct dmar_drhd_unit *drhd;
3636 struct intel_iommu *iommu;
3672 3637
3673 /* VT-d is required for a TXT/tboot launch, so enforce that */ 3638 /* VT-d is required for a TXT/tboot launch, so enforce that */
3674 force_on = tboot_force_iommu(); 3639 force_on = tboot_force_iommu();
@@ -3676,36 +3641,29 @@ int __init intel_iommu_init(void)
3676 if (dmar_table_init()) { 3641 if (dmar_table_init()) {
3677 if (force_on) 3642 if (force_on)
3678 panic("tboot: Failed to initialize DMAR table\n"); 3643 panic("tboot: Failed to initialize DMAR table\n");
3679 return -ENODEV; 3644 goto out_free_dmar;
3680 } 3645 }
3681 3646
3682 /* 3647 /*
3683 * Disable translation if already enabled prior to OS handover. 3648 * Disable translation if already enabled prior to OS handover.
3684 */ 3649 */
3685 for_each_drhd_unit(drhd) { 3650 for_each_active_iommu(iommu, drhd)
3686 struct intel_iommu *iommu;
3687
3688 if (drhd->ignored)
3689 continue;
3690
3691 iommu = drhd->iommu;
3692 if (iommu->gcmd & DMA_GCMD_TE) 3651 if (iommu->gcmd & DMA_GCMD_TE)
3693 iommu_disable_translation(iommu); 3652 iommu_disable_translation(iommu);
3694 }
3695 3653
3696 if (dmar_dev_scope_init() < 0) { 3654 if (dmar_dev_scope_init() < 0) {
3697 if (force_on) 3655 if (force_on)
3698 panic("tboot: Failed to initialize DMAR device scope\n"); 3656 panic("tboot: Failed to initialize DMAR device scope\n");
3699 return -ENODEV; 3657 goto out_free_dmar;
3700 } 3658 }
3701 3659
3702 if (no_iommu || dmar_disabled) 3660 if (no_iommu || dmar_disabled)
3703 return -ENODEV; 3661 goto out_free_dmar;
3704 3662
3705 if (iommu_init_mempool()) { 3663 if (iommu_init_mempool()) {
3706 if (force_on) 3664 if (force_on)
3707 panic("tboot: Failed to initialize iommu memory\n"); 3665 panic("tboot: Failed to initialize iommu memory\n");
3708 return -ENODEV; 3666 goto out_free_dmar;
3709 } 3667 }
3710 3668
3711 if (list_empty(&dmar_rmrr_units)) 3669 if (list_empty(&dmar_rmrr_units))
@@ -3717,7 +3675,7 @@ int __init intel_iommu_init(void)
3717 if (dmar_init_reserved_ranges()) { 3675 if (dmar_init_reserved_ranges()) {
3718 if (force_on) 3676 if (force_on)
3719 panic("tboot: Failed to reserve iommu ranges\n"); 3677 panic("tboot: Failed to reserve iommu ranges\n");
3720 return -ENODEV; 3678 goto out_free_mempool;
3721 } 3679 }
3722 3680
3723 init_no_remapping_devices(); 3681 init_no_remapping_devices();
@@ -3727,9 +3685,7 @@ int __init intel_iommu_init(void)
3727 if (force_on) 3685 if (force_on)
3728 panic("tboot: Failed to initialize DMARs\n"); 3686 panic("tboot: Failed to initialize DMARs\n");
3729 printk(KERN_ERR "IOMMU: dmar init failed\n"); 3687 printk(KERN_ERR "IOMMU: dmar init failed\n");
3730 put_iova_domain(&reserved_iova_list); 3688 goto out_free_reserved_range;
3731 iommu_exit_mempool();
3732 return ret;
3733 } 3689 }
3734 printk(KERN_INFO 3690 printk(KERN_INFO
3735 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n"); 3691 "PCI-DMA: Intel(R) Virtualization Technology for Directed I/O\n");
@@ -3749,6 +3705,14 @@ int __init intel_iommu_init(void)
3749 intel_iommu_enabled = 1; 3705 intel_iommu_enabled = 1;
3750 3706
3751 return 0; 3707 return 0;
3708
3709out_free_reserved_range:
3710 put_iova_domain(&reserved_iova_list);
3711out_free_mempool:
3712 iommu_exit_mempool();
3713out_free_dmar:
3714 intel_iommu_free_dmars();
3715 return ret;
3752} 3716}
3753 3717
3754static void iommu_detach_dependent_devices(struct intel_iommu *iommu, 3718static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
@@ -3877,7 +3841,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3877} 3841}
3878 3842
3879/* domain id for virtual machine, it won't be set in context */ 3843/* domain id for virtual machine, it won't be set in context */
3880static unsigned long vm_domid; 3844static atomic_t vm_domid = ATOMIC_INIT(0);
3881 3845
3882static struct dmar_domain *iommu_alloc_vm_domain(void) 3846static struct dmar_domain *iommu_alloc_vm_domain(void)
3883{ 3847{
@@ -3887,7 +3851,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
3887 if (!domain) 3851 if (!domain)
3888 return NULL; 3852 return NULL;
3889 3853
3890 domain->id = vm_domid++; 3854 domain->id = atomic_inc_return(&vm_domid);
3891 domain->nid = -1; 3855 domain->nid = -1;
3892 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp)); 3856 memset(domain->iommu_bmp, 0, sizeof(domain->iommu_bmp));
3893 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE; 3857 domain->flags = DOMAIN_FLAG_VIRTUAL_MACHINE;
@@ -3934,11 +3898,7 @@ static void iommu_free_vm_domain(struct dmar_domain *domain)
3934 unsigned long i; 3898 unsigned long i;
3935 unsigned long ndomains; 3899 unsigned long ndomains;
3936 3900
3937 for_each_drhd_unit(drhd) { 3901 for_each_active_iommu(iommu, drhd) {
3938 if (drhd->ignored)
3939 continue;
3940 iommu = drhd->iommu;
3941
3942 ndomains = cap_ndoms(iommu->cap); 3902 ndomains = cap_ndoms(iommu->cap);
3943 for_each_set_bit(i, iommu->domain_ids, ndomains) { 3903 for_each_set_bit(i, iommu->domain_ids, ndomains) {
3944 if (iommu->domains[i] == domain) { 3904 if (iommu->domains[i] == domain) {
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 0cb7528b30a1..ef5f65dbafe9 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -40,13 +40,15 @@ static int ir_ioapic_num, ir_hpet_num;
40 40
41static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); 41static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
42 42
43static int __init parse_ioapics_under_ir(void);
44
43static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 45static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
44{ 46{
45 struct irq_cfg *cfg = irq_get_chip_data(irq); 47 struct irq_cfg *cfg = irq_get_chip_data(irq);
46 return cfg ? &cfg->irq_2_iommu : NULL; 48 return cfg ? &cfg->irq_2_iommu : NULL;
47} 49}
48 50
49int get_irte(int irq, struct irte *entry) 51static int get_irte(int irq, struct irte *entry)
50{ 52{
51 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 53 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
52 unsigned long flags; 54 unsigned long flags;
@@ -69,19 +71,13 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
69 struct ir_table *table = iommu->ir_table; 71 struct ir_table *table = iommu->ir_table;
70 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); 72 struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
71 struct irq_cfg *cfg = irq_get_chip_data(irq); 73 struct irq_cfg *cfg = irq_get_chip_data(irq);
72 u16 index, start_index;
73 unsigned int mask = 0; 74 unsigned int mask = 0;
74 unsigned long flags; 75 unsigned long flags;
75 int i; 76 int index;
76 77
77 if (!count || !irq_iommu) 78 if (!count || !irq_iommu)
78 return -1; 79 return -1;
79 80
80 /*
81 * start the IRTE search from index 0.
82 */
83 index = start_index = 0;
84
85 if (count > 1) { 81 if (count > 1) {
86 count = __roundup_pow_of_two(count); 82 count = __roundup_pow_of_two(count);
87 mask = ilog2(count); 83 mask = ilog2(count);
@@ -96,32 +92,17 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
96 } 92 }
97 93
98 raw_spin_lock_irqsave(&irq_2_ir_lock, flags); 94 raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
99 do { 95 index = bitmap_find_free_region(table->bitmap,
100 for (i = index; i < index + count; i++) 96 INTR_REMAP_TABLE_ENTRIES, mask);
101 if (table->base[i].present) 97 if (index < 0) {
102 break; 98 pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
103 /* empty index found */ 99 } else {
104 if (i == index + count) 100 cfg->remapped = 1;
105 break; 101 irq_iommu->iommu = iommu;
106 102 irq_iommu->irte_index = index;
107 index = (index + count) % INTR_REMAP_TABLE_ENTRIES; 103 irq_iommu->sub_handle = 0;
108 104 irq_iommu->irte_mask = mask;
109 if (index == start_index) { 105 }
110 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
111 printk(KERN_ERR "can't allocate an IRTE\n");
112 return -1;
113 }
114 } while (1);
115
116 for (i = index; i < index + count; i++)
117 table->base[i].present = 1;
118
119 cfg->remapped = 1;
120 irq_iommu->iommu = iommu;
121 irq_iommu->irte_index = index;
122 irq_iommu->sub_handle = 0;
123 irq_iommu->irte_mask = mask;
124
125 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); 106 raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
126 107
127 return index; 108 return index;
@@ -254,6 +235,8 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
254 set_64bit(&entry->low, 0); 235 set_64bit(&entry->low, 0);
255 set_64bit(&entry->high, 0); 236 set_64bit(&entry->high, 0);
256 } 237 }
238 bitmap_release_region(iommu->ir_table->bitmap, index,
239 irq_iommu->irte_mask);
257 240
258 return qi_flush_iec(iommu, index, irq_iommu->irte_mask); 241 return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
259} 242}
@@ -336,7 +319,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
336 return -1; 319 return -1;
337 } 320 }
338 321
339 set_irte_sid(irte, 1, 0, sid); 322 set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, sid);
340 323
341 return 0; 324 return 0;
342} 325}
@@ -453,6 +436,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
453{ 436{
454 struct ir_table *ir_table; 437 struct ir_table *ir_table;
455 struct page *pages; 438 struct page *pages;
439 unsigned long *bitmap;
456 440
457 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 441 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table),
458 GFP_ATOMIC); 442 GFP_ATOMIC);
@@ -464,13 +448,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
464 INTR_REMAP_PAGE_ORDER); 448 INTR_REMAP_PAGE_ORDER);
465 449
466 if (!pages) { 450 if (!pages) {
467 printk(KERN_ERR "failed to allocate pages of order %d\n", 451 pr_err("IR%d: failed to allocate pages of order %d\n",
468 INTR_REMAP_PAGE_ORDER); 452 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
469 kfree(iommu->ir_table); 453 kfree(iommu->ir_table);
470 return -ENOMEM; 454 return -ENOMEM;
471 } 455 }
472 456
457 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
458 sizeof(long), GFP_ATOMIC);
459 if (bitmap == NULL) {
460 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
461 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
462 kfree(ir_table);
463 return -ENOMEM;
464 }
465
473 ir_table->base = page_address(pages); 466 ir_table->base = page_address(pages);
467 ir_table->bitmap = bitmap;
474 468
475 iommu_set_irq_remapping(iommu, mode); 469 iommu_set_irq_remapping(iommu, mode);
476 return 0; 470 return 0;
@@ -521,6 +515,7 @@ static int __init dmar_x2apic_optout(void)
521static int __init intel_irq_remapping_supported(void) 515static int __init intel_irq_remapping_supported(void)
522{ 516{
523 struct dmar_drhd_unit *drhd; 517 struct dmar_drhd_unit *drhd;
518 struct intel_iommu *iommu;
524 519
525 if (disable_irq_remap) 520 if (disable_irq_remap)
526 return 0; 521 return 0;
@@ -539,12 +534,9 @@ static int __init intel_irq_remapping_supported(void)
539 if (!dmar_ir_support()) 534 if (!dmar_ir_support())
540 return 0; 535 return 0;
541 536
542 for_each_drhd_unit(drhd) { 537 for_each_iommu(iommu, drhd)
543 struct intel_iommu *iommu = drhd->iommu;
544
545 if (!ecap_ir_support(iommu->ecap)) 538 if (!ecap_ir_support(iommu->ecap))
546 return 0; 539 return 0;
547 }
548 540
549 return 1; 541 return 1;
550} 542}
@@ -552,6 +544,7 @@ static int __init intel_irq_remapping_supported(void)
552static int __init intel_enable_irq_remapping(void) 544static int __init intel_enable_irq_remapping(void)
553{ 545{
554 struct dmar_drhd_unit *drhd; 546 struct dmar_drhd_unit *drhd;
547 struct intel_iommu *iommu;
555 bool x2apic_present; 548 bool x2apic_present;
556 int setup = 0; 549 int setup = 0;
557 int eim = 0; 550 int eim = 0;
@@ -564,6 +557,8 @@ static int __init intel_enable_irq_remapping(void)
564 } 557 }
565 558
566 if (x2apic_present) { 559 if (x2apic_present) {
560 pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
561
567 eim = !dmar_x2apic_optout(); 562 eim = !dmar_x2apic_optout();
568 if (!eim) 563 if (!eim)
569 printk(KERN_WARNING 564 printk(KERN_WARNING
@@ -572,9 +567,7 @@ static int __init intel_enable_irq_remapping(void)
572 "Use 'intremap=no_x2apic_optout' to override BIOS request.\n"); 567 "Use 'intremap=no_x2apic_optout' to override BIOS request.\n");
573 } 568 }
574 569
575 for_each_drhd_unit(drhd) { 570 for_each_iommu(iommu, drhd) {
576 struct intel_iommu *iommu = drhd->iommu;
577
578 /* 571 /*
579 * If the queued invalidation is already initialized, 572 * If the queued invalidation is already initialized,
580 * shouldn't disable it. 573 * shouldn't disable it.
@@ -599,9 +592,7 @@ static int __init intel_enable_irq_remapping(void)
599 /* 592 /*
600 * check for the Interrupt-remapping support 593 * check for the Interrupt-remapping support
601 */ 594 */
602 for_each_drhd_unit(drhd) { 595 for_each_iommu(iommu, drhd) {
603 struct intel_iommu *iommu = drhd->iommu;
604
605 if (!ecap_ir_support(iommu->ecap)) 596 if (!ecap_ir_support(iommu->ecap))
606 continue; 597 continue;
607 598
@@ -615,10 +606,8 @@ static int __init intel_enable_irq_remapping(void)
615 /* 606 /*
616 * Enable queued invalidation for all the DRHD's. 607 * Enable queued invalidation for all the DRHD's.
617 */ 608 */
618 for_each_drhd_unit(drhd) { 609 for_each_iommu(iommu, drhd) {
619 int ret; 610 int ret = dmar_enable_qi(iommu);
620 struct intel_iommu *iommu = drhd->iommu;
621 ret = dmar_enable_qi(iommu);
622 611
623 if (ret) { 612 if (ret) {
624 printk(KERN_ERR "DRHD %Lx: failed to enable queued, " 613 printk(KERN_ERR "DRHD %Lx: failed to enable queued, "
@@ -631,9 +620,7 @@ static int __init intel_enable_irq_remapping(void)
631 /* 620 /*
632 * Setup Interrupt-remapping for all the DRHD's now. 621 * Setup Interrupt-remapping for all the DRHD's now.
633 */ 622 */
634 for_each_drhd_unit(drhd) { 623 for_each_iommu(iommu, drhd) {
635 struct intel_iommu *iommu = drhd->iommu;
636
637 if (!ecap_ir_support(iommu->ecap)) 624 if (!ecap_ir_support(iommu->ecap))
638 continue; 625 continue;
639 626
@@ -774,22 +761,20 @@ static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
774 * Finds the assocaition between IOAPIC's and its Interrupt-remapping 761 * Finds the assocaition between IOAPIC's and its Interrupt-remapping
775 * hardware unit. 762 * hardware unit.
776 */ 763 */
777int __init parse_ioapics_under_ir(void) 764static int __init parse_ioapics_under_ir(void)
778{ 765{
779 struct dmar_drhd_unit *drhd; 766 struct dmar_drhd_unit *drhd;
767 struct intel_iommu *iommu;
780 int ir_supported = 0; 768 int ir_supported = 0;
781 int ioapic_idx; 769 int ioapic_idx;
782 770
783 for_each_drhd_unit(drhd) { 771 for_each_iommu(iommu, drhd)
784 struct intel_iommu *iommu = drhd->iommu;
785
786 if (ecap_ir_support(iommu->ecap)) { 772 if (ecap_ir_support(iommu->ecap)) {
787 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu)) 773 if (ir_parse_ioapic_hpet_scope(drhd->hdr, iommu))
788 return -1; 774 return -1;
789 775
790 ir_supported = 1; 776 ir_supported = 1;
791 } 777 }
792 }
793 778
794 if (!ir_supported) 779 if (!ir_supported)
795 return 0; 780 return 0;
@@ -807,7 +792,7 @@ int __init parse_ioapics_under_ir(void)
807 return 1; 792 return 1;
808} 793}
809 794
810int __init ir_dev_scope_init(void) 795static int __init ir_dev_scope_init(void)
811{ 796{
812 if (!irq_remapping_enabled) 797 if (!irq_remapping_enabled)
813 return 0; 798 return 0;
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 39f81aeefcd6..228632c99adb 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -150,7 +150,7 @@ static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
150 return do_setup_msix_irqs(dev, nvec); 150 return do_setup_msix_irqs(dev, nvec);
151} 151}
152 152
153void eoi_ioapic_pin_remapped(int apic, int pin, int vector) 153static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
154{ 154{
155 /* 155 /*
156 * Intr-remapping uses pin number as the virtual vector 156 * Intr-remapping uses pin number as the virtual vector
@@ -295,8 +295,8 @@ int setup_ioapic_remapped_entry(int irq,
295 vector, attr); 295 vector, attr);
296} 296}
297 297
298int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask, 298static int set_remapped_irq_affinity(struct irq_data *data,
299 bool force) 299 const struct cpumask *mask, bool force)
300{ 300{
301 if (!config_enabled(CONFIG_SMP) || !remap_ops || 301 if (!config_enabled(CONFIG_SMP) || !remap_ops ||
302 !remap_ops->set_affinity) 302 !remap_ops->set_affinity)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index ee249bc959f8..e550ccb7634e 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -20,6 +20,7 @@
20#include <linux/export.h> 20#include <linux/export.h>
21#include <linux/limits.h> 21#include <linux/limits.h>
22#include <linux/of.h> 22#include <linux/of.h>
23#include <linux/of_iommu.h>
23 24
24/** 25/**
25 * of_get_dma_window - Parse *dma-window property and returns 0 if found. 26 * of_get_dma_window - Parse *dma-window property and returns 0 if found.
diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c
index d572863dfccd..7a3b928fad1c 100644
--- a/drivers/iommu/shmobile-iommu.c
+++ b/drivers/iommu/shmobile-iommu.c
@@ -380,14 +380,13 @@ int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
380 kmem_cache_destroy(l1cache); 380 kmem_cache_destroy(l1cache);
381 return -ENOMEM; 381 return -ENOMEM;
382 } 382 }
383 archdata = kmalloc(sizeof(*archdata), GFP_KERNEL); 383 archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
384 if (!archdata) { 384 if (!archdata) {
385 kmem_cache_destroy(l1cache); 385 kmem_cache_destroy(l1cache);
386 kmem_cache_destroy(l2cache); 386 kmem_cache_destroy(l2cache);
387 return -ENOMEM; 387 return -ENOMEM;
388 } 388 }
389 spin_lock_init(&archdata->attach_lock); 389 spin_lock_init(&archdata->attach_lock);
390 archdata->attached = NULL;
391 archdata->ipmmu = ipmmu; 390 archdata->ipmmu = ipmmu;
392 ipmmu_archdata = archdata; 391 ipmmu_archdata = archdata;
393 bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops); 392 bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c
index 8321f89596c4..e3bc2e19b6dd 100644
--- a/drivers/iommu/shmobile-ipmmu.c
+++ b/drivers/iommu/shmobile-ipmmu.c
@@ -35,12 +35,12 @@ void ipmmu_tlb_flush(struct shmobile_ipmmu *ipmmu)
35 if (!ipmmu) 35 if (!ipmmu)
36 return; 36 return;
37 37
38 mutex_lock(&ipmmu->flush_lock); 38 spin_lock(&ipmmu->flush_lock);
39 if (ipmmu->tlb_enabled) 39 if (ipmmu->tlb_enabled)
40 ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN); 40 ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH | IMCTR1_TLBEN);
41 else 41 else
42 ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH); 42 ipmmu_reg_write(ipmmu, IMCTR1, IMCTR1_FLUSH);
43 mutex_unlock(&ipmmu->flush_lock); 43 spin_unlock(&ipmmu->flush_lock);
44} 44}
45 45
46void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size, 46void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
@@ -49,7 +49,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
49 if (!ipmmu) 49 if (!ipmmu)
50 return; 50 return;
51 51
52 mutex_lock(&ipmmu->flush_lock); 52 spin_lock(&ipmmu->flush_lock);
53 switch (size) { 53 switch (size) {
54 default: 54 default:
55 ipmmu->tlb_enabled = 0; 55 ipmmu->tlb_enabled = 0;
@@ -85,7 +85,7 @@ void ipmmu_tlb_set(struct shmobile_ipmmu *ipmmu, unsigned long phys, int size,
85 } 85 }
86 ipmmu_reg_write(ipmmu, IMTTBR, phys); 86 ipmmu_reg_write(ipmmu, IMTTBR, phys);
87 ipmmu_reg_write(ipmmu, IMASID, asid); 87 ipmmu_reg_write(ipmmu, IMASID, asid);
88 mutex_unlock(&ipmmu->flush_lock); 88 spin_unlock(&ipmmu->flush_lock);
89} 89}
90 90
91static int ipmmu_probe(struct platform_device *pdev) 91static int ipmmu_probe(struct platform_device *pdev)
@@ -104,7 +104,7 @@ static int ipmmu_probe(struct platform_device *pdev)
104 dev_err(&pdev->dev, "cannot allocate device data\n"); 104 dev_err(&pdev->dev, "cannot allocate device data\n");
105 return -ENOMEM; 105 return -ENOMEM;
106 } 106 }
107 mutex_init(&ipmmu->flush_lock); 107 spin_lock_init(&ipmmu->flush_lock);
108 ipmmu->dev = &pdev->dev; 108 ipmmu->dev = &pdev->dev;
109 ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start, 109 ipmmu->ipmmu_base = devm_ioremap_nocache(&pdev->dev, res->start,
110 resource_size(res)); 110 resource_size(res));
diff --git a/drivers/iommu/shmobile-ipmmu.h b/drivers/iommu/shmobile-ipmmu.h
index 4d53684673e1..9524743ca1fb 100644
--- a/drivers/iommu/shmobile-ipmmu.h
+++ b/drivers/iommu/shmobile-ipmmu.h
@@ -14,7 +14,7 @@ struct shmobile_ipmmu {
14 struct device *dev; 14 struct device *dev;
15 void __iomem *ipmmu_base; 15 void __iomem *ipmmu_base;
16 int tlb_enabled; 16 int tlb_enabled;
17 struct mutex flush_lock; 17 spinlock_t flush_lock;
18 const char * const *dev_names; 18 const char * const *dev_names;
19 unsigned int num_dev_names; 19 unsigned int num_dev_names;
20}; 20};